source
stringlengths
3
92
c
stringlengths
26
2.25M
ast-dump-openmp-atomic.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(int i) { #pragma omp atomic ++i; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-atomic.c:3:1, line:6:1> line:3:6 test 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used i 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:6:1> // CHECK-NEXT: `-OMPAtomicDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, col:5> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-UnaryOperator {{.*}} <col:3, col:5> 'int' prefix '++' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:5> 'int' lvalue ParmVar {{.*}} 'i' 'int' // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-atomic.c:4:1) *const restrict' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:5:5> 'int' lvalue ParmVar {{.*}} 'i' 'int'
diagsm_x_sky_u_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT r = 0; r < A->rows; ++r) { for (ALPHA_INT c = 0; c < columns; ++c) { alpha_mul(y[index2(r, c, ldy)], alpha, x[index2(r, c, ldx)]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
mscash1_fmt_plug.c
/* MSCASH patch for john (performance improvement) * * Modified for utf-8 support by magnum in 2011, same terms as below * * Written by Alain Espinosa <alainesp at gmail.com> in 2007. No copyright * is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the * public domain is deemed null and void, then the software is * Copyright (c) 2007 Alain Espinosa and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * * (This is a heavily cut-down "BSD license".) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_mscash; #elif FMT_REGISTERS_H john_register_one(&fmt_mscash); #else #include <string.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "unicode.h" #include "options.h" #include "loader.h" #include "johnswap.h" #include "mscash_common.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 192 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "mscash" #define FORMAT_NAME "MS Cache Hash (DCC)" #define ALGORITHM_NAME "MD4 32/" ARCH_BITS_STR #define PLAINTEXT_LENGTH 27 #define SALT_SIZE (11*4) #define OK_NUM_KEYS 64 #define BEST_NUM_KEYS 512 #ifdef _OPENMP #define MS_NUM_KEYS OK_NUM_KEYS #else #define MS_NUM_KEYS BEST_NUM_KEYS #endif #define MIN_KEYS_PER_CRYPT OK_NUM_KEYS #define MAX_KEYS_PER_CRYPT MS_NUM_KEYS static unsigned int *ms_buffer1x; static unsigned int *output1x; static unsigned int *crypt_out; static unsigned int *last; static unsigned int *last_i; static unsigned int *salt_buffer; static unsigned int new_key; //Init values #define INIT_A 0x67452301 #define INIT_B 0xefcdab89 #define INIT_C 0x98badcfe #define INIT_D 0x10325476 #define SQRT_2 0x5a827999 #define SQRT_3 0x6ed9eba1 static void set_key_utf8(char *_key, int index); static void set_key_encoding(char *_key, int index); struct fmt_main fmt_mscash; #if !ARCH_LITTLE_ENDIAN inline static void swap(unsigned int *x, int count) { while (count--) { *x = JOHNSWAP(*x); x++; } } #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; fmt_mscash.params.max_keys_per_crypt *= omp_t; #endif ms_buffer1x = mem_calloc(sizeof(ms_buffer1x[0]), 16*fmt_mscash.params.max_keys_per_crypt); output1x = mem_calloc(sizeof(output1x[0]) , 4*fmt_mscash.params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(crypt_out[0]) , 4*fmt_mscash.params.max_keys_per_crypt); last = mem_calloc(sizeof(last[0]) , 4*fmt_mscash.params.max_keys_per_crypt); last_i = mem_calloc(sizeof(last_i[0]) , fmt_mscash.params.max_keys_per_crypt); new_key=1; mscash1_adjust_tests(self, options.target_enc, PLAINTEXT_LENGTH, set_key_utf8, set_key_encoding); } static void done(void) { MEM_FREE(last_i); MEM_FREE(last); MEM_FREE(crypt_out); MEM_FREE(output1x); MEM_FREE(ms_buffer1x); } static void set_salt(void *salt) { salt_buffer=salt; } static void *get_salt(char *ciphertext) { unsigned char input[19*3+1]; int i, utf16len; static UTF16 *out=0; char *lasth = strrchr(ciphertext, '#'); if (!out) out = mem_alloc_tiny(22*sizeof(UTF16), MEM_ALIGN_WORD); memset(out, 0, 22*sizeof(UTF16)); ciphertext += FORMAT_TAG_LEN; for (i = 0; &ciphertext[i] < lasth; i++) input[i] = ciphertext[i]; input[i] = 0; utf16len = enc_to_utf16(out, 19, input, i); if (utf16len < 0) utf16len = strlen16(out); #if ARCH_LITTLE_ENDIAN out[utf16len] = 0x80; #else out[utf16len] = 0x8000; swap((unsigned int*)out, (i>>1)+1); #endif ((unsigned int*)out)[10] = (8 + utf16len) << 4; // dump_stuff(out, 44); return out; } static void *get_binary(char *ciphertext) { static unsigned int out[BINARY_SIZE/sizeof(unsigned int)]; unsigned int i=0; unsigned int temp; unsigned int *salt=fmt_mscash.methods.salt(ciphertext); /* We need to allow salt containing '#' so we search backwards */ ciphertext = strrchr(ciphertext, '#') + 1; for (; i<4 ;i++) { temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24; out[i]=temp; } out[0] -= INIT_A; out[1] -= INIT_B; out[2] -= INIT_C; out[3] -= INIT_D; // Reversed b += (c ^ d ^ a) + salt_buffer[11] + SQRT_3; b = (b << 15) | (b >> 17); out[1] = (out[1] >> 15) | (out[1] << 17); out[1] -= SQRT_3 + (out[2] ^ out[3] ^ out[0]); // Reversed c += (d ^ a ^ b) + salt_buffer[3] + SQRT_3; c = (c << 11) | (c >> 21); out[2] = (out[2] << 21) | (out[2] >> 11); out[2]-= SQRT_3 + (out[3] ^ out[0] ^ out[1]) + salt[3]; // Reversed d += (a ^ b ^ c) + salt_buffer[7] + SQRT_3; d = (d << 9 ) | (d >> 23); out[3] = (out[3] << 23) | (out[3] >> 9); out[3] -= SQRT_3 + (out[0] ^ out[1] ^ out[2]) + salt[7]; //+ SQRT_3; d = (d << 9 ) | (d >> 23); out[3]=(out[3] << 23 ) | (out[3] >> 9); out[3]-=SQRT_3; return out; } static int binary_hash_0(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_0; } static int binary_hash_1(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_1; } static int binary_hash_2(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_2; } static int binary_hash_3(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_3; } static int binary_hash_4(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_4; } static int binary_hash_5(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_5; } static int binary_hash_6(void *binary) { return ((unsigned int*)binary)[3] & PH_MASK_6; } static int get_hash_0(int index) { return output1x[4*index+3] & PH_MASK_0; } static int get_hash_1(int index) { return output1x[4*index+3] & PH_MASK_1; } static int get_hash_2(int index) { return output1x[4*index+3] & PH_MASK_2; } static int get_hash_3(int index) { return output1x[4*index+3] & PH_MASK_3; } static int get_hash_4(int index) { return output1x[4*index+3] & PH_MASK_4; } static int get_hash_5(int index) { return output1x[4*index+3] & PH_MASK_5; } static int get_hash_6(int index) { return output1x[4*index+3] & PH_MASK_6; } static void nt_hash(int count) { int i; #if MS_NUM_KEYS > 1 && defined(_OPENMP) #pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last) #endif for (i = 0; i < count; i++) { unsigned int a; unsigned int b; unsigned int c; unsigned int d; /* Round 1 */ a = 0xFFFFFFFF + ms_buffer1x[16*i+0];a = (a << 3 ) | (a >> 29); d = INIT_D + (INIT_C ^ (a & 0x77777777)) + ms_buffer1x[16*i+1];d = (d << 7 ) | (d >> 25); c = INIT_C + (INIT_B ^ (d & (a ^ INIT_B)))+ ms_buffer1x[16*i+2];c = (c << 11) | (c >> 21); b = INIT_B + (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+3];b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+4] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+5] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+6] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+7] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+8] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+9] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+10] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+11] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+12] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+13] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+14] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a)))/*+ms_buffer1x[16*i+15]*/;b = (b << 19) | (b >> 13); /* Round 2 */ a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+4] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+8] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+12] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+5] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+9] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+13] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+6] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+10] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+14] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+7] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+11] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a))/*+ms_buffer1x[16*i+15]*/+SQRT_2; b = (b << 13) | (b >> 19); /* Round 3 */ a += (b ^ c ^ d) + ms_buffer1x[16*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+8] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+4] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + ms_buffer1x[16*i+12] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + ms_buffer1x[16*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+10] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+6] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + ms_buffer1x[16*i+14] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + ms_buffer1x[16*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+9] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+5] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + ms_buffer1x[16*i+13] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + ms_buffer1x[16*i+3] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+11] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+7] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) /*+ ms_buffer1x[16*i+15] */+ SQRT_3; b = (b << 15) | (b >> 17); crypt_out[4*i+0] = a + INIT_A; crypt_out[4*i+1] = b + INIT_B; crypt_out[4*i+2] = c + INIT_C; crypt_out[4*i+3] = d + INIT_D; //Another MD4_crypt for the salt /* Round 1 */ a= 0xFFFFFFFF +crypt_out[4*i+0]; a=(a<<3 )|(a>>29); d=INIT_D + ( INIT_C ^ ( a & 0x77777777)) +crypt_out[4*i+1]; d=(d<<7 )|(d>>25); c=INIT_C + ( INIT_B ^ ( d & ( a ^ INIT_B))) +crypt_out[4*i+2]; c=(c<<11)|(c>>21); b=INIT_B + ( a ^ ( c & ( d ^ a ))) +crypt_out[4*i+3]; b=(b<<19)|(b>>13); last[4*i+0]=a; last[4*i+1]=b; last[4*i+2]=c; last[4*i+3]=d; } } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i; if (new_key) { new_key=0; nt_hash(count); } #if MS_NUM_KEYS > 1 && defined(_OPENMP) #pragma omp parallel for default(none) private(i) shared(count, last, crypt_out, salt_buffer, output1x) #endif for (i = 0; i < count; i++) { unsigned int a; unsigned int b; unsigned int c; unsigned int d; a = last[4*i+0]; b = last[4*i+1]; c = last[4*i+2]; d = last[4*i+3]; a += (d ^ (b & (c ^ d))) + salt_buffer[0] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + salt_buffer[1] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + salt_buffer[2] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + salt_buffer[3] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + salt_buffer[4] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + salt_buffer[5] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + salt_buffer[6] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + salt_buffer[7] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + salt_buffer[8] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + salt_buffer[9] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + salt_buffer[10] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a)))/*+salt_buffer[11]*/;b = (b << 19) | (b >> 13); /* Round 2 */ a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[0] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[4] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + salt_buffer[8] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[1] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[5] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + salt_buffer[9] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[2] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[6] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + salt_buffer[10] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[3] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[7] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a))/*+ salt_buffer[11]*/+ SQRT_2; b = (b << 13) | (b >> 19); /* Round 3 */ a += (b ^ c ^ d) + crypt_out[4*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + salt_buffer[4] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + salt_buffer[0] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + salt_buffer[8] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + crypt_out[4*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + salt_buffer[6] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + salt_buffer[2] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + salt_buffer[10] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + crypt_out[4*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + salt_buffer[5]; output1x[4*i+0]=a; output1x[4*i+1]=b; output1x[4*i+2]=c; output1x[4*i+3]=d; } return count; } static int cmp_all(void *binary, int count) { unsigned int i=0; unsigned int d=((unsigned int *)binary)[3]; for (;i<count;i++) if (d==output1x[i*4+3]) return 1; return 0; } static int cmp_one(void * binary, int index) { unsigned int *t=(unsigned int *)binary; unsigned int a=output1x[4*index+0]; unsigned int b=output1x[4*index+1]; unsigned int c=output1x[4*index+2]; unsigned int d=output1x[4*index+3]; if (d!=t[3]) return 0; d+=SQRT_3;d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + salt_buffer[1] + SQRT_3; c = (c << 11) | (c >> 21); if (c!=t[2]) return 0; b += (c ^ d ^ a) + salt_buffer[9] + SQRT_3; b = (b << 15) | (b >> 17); if (b!=t[1]) return 0; a += (b ^ c ^ d) + crypt_out[4*index+3]+ SQRT_3; a = (a << 3 ) | (a >> 29); return (a==t[0]); } static int cmp_exact(char *source, int index) { // This check is for the unreal case of collisions. // It verifies that the salts are the same. unsigned int *salt=fmt_mscash.methods.salt(source); unsigned int i=0; for (;i<11;i++) if (salt[i]!=salt_buffer[i]) return 0; return 1; } // This is common code for the SSE/MMX/generic variants of non-UTF8 set_key inline static void set_key_helper(unsigned int * keybuffer, unsigned int xBuf, const unsigned char * key, unsigned int lenStoreOffset, unsigned int *last_length) { unsigned int i=0; unsigned int md4_size=0; for (; key[md4_size] && md4_size < PLAINTEXT_LENGTH; i += xBuf, md4_size++) { unsigned int temp; if ((temp = key[++md4_size])) { keybuffer[i] = key[md4_size-1] | (temp << 16); } else { keybuffer[i] = key[md4_size-1] | 0x800000; goto key_cleaning; } } keybuffer[i] = 0x80; key_cleaning: i += xBuf; for (;i <= *last_length; i += xBuf) keybuffer[i] = 0; *last_length = (md4_size >> 1)+1; keybuffer[lenStoreOffset] = md4_size << 4; } static void set_key(char *_key, int index) { set_key_helper(&ms_buffer1x[index << 4], 1, (unsigned char *)_key, 14, &last_i[index]); //new password_candidate new_key=1; } // UTF-8 conversion right into key buffer // This is common code for the SSE/MMX/generic variants inline static void set_key_helper_utf8(unsigned int * keybuffer, unsigned int xBuf, const UTF8 * source, unsigned int lenStoreOffset, unsigned int *lastlen) { unsigned int *target = keybuffer; UTF32 chl, chh = 0x80; unsigned int outlen = 0; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 0: break; default: *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; outlen++; if (chl > UNI_MAX_BMP) { if (outlen == PLAINTEXT_LENGTH) { chh = 0x80; *target = (chh << 16) | chl; target += xBuf; *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); outlen++; } else if (*source && outlen < PLAINTEXT_LENGTH) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 0: break; default: *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; outlen++; } else { chh = 0x80; *target = chh << 16 | chl; target += xBuf; break; } *target = chh << 16 | chl; target += xBuf; } if (chh != 0x80 || outlen == 0) { *target = 0x80; target += xBuf; } while(target < &keybuffer[*lastlen]) { *target = 0; target += xBuf; } *lastlen = ((outlen >> 1) + 1) * xBuf; keybuffer[lenStoreOffset] = outlen << 4; } static void set_key_utf8(char *_key, int index) { set_key_helper_utf8(&ms_buffer1x[index << 4], 1, (UTF8 *)_key, 14, &last_i[index]); //new password_candidate new_key=1; } // This is common code for the SSE/MMX/generic variants of non-UTF8 non-ISO-8859-1 set_key inline static void set_key_helper_encoding(unsigned int * keybuffer, unsigned int xBuf, const unsigned char * key, unsigned int lenStoreOffset, unsigned int *last_length) { unsigned int i=0; int md4_size; md4_size = enc_to_utf16( (UTF16 *)keybuffer, PLAINTEXT_LENGTH, (UTF8 *) key, strlen((char*)key)); if (md4_size < 0) md4_size = strlen16((UTF16 *)keybuffer); #if ARCH_LITTLE_ENDIAN ((UTF16*)keybuffer)[md4_size] = 0x80; #else ((UTF16*)keybuffer)[md4_size] = 0x8000; #endif ((UTF16*)keybuffer)[md4_size+1] = 0; #if !ARCH_LITTLE_ENDIAN ((UTF16*)keybuffer)[md4_size+2] = 0; #endif i = md4_size>>1; i += xBuf; for (;i <= *last_length; i += xBuf) keybuffer[i] = 0; #if !ARCH_LITTLE_ENDIAN swap(keybuffer, (md4_size>>1)+1); #endif *last_length = (md4_size >> 1) + 1; keybuffer[lenStoreOffset] = md4_size << 4; } static void set_key_encoding(char *_key, int index) { set_key_helper_encoding(&ms_buffer1x[index << 4], 1, (unsigned char *)_key, 14, &last_i[index]); //new password_candidate new_key=1; } // Get the key back from the key buffer, from UCS-2 LE static char *get_key(int index) { static union { UTF16 u16[PLAINTEXT_LENGTH + 1]; unsigned int u32[(PLAINTEXT_LENGTH + 1 + 1) / 2]; } key; unsigned int * keybuffer = &ms_buffer1x[index << 4]; unsigned int md4_size; unsigned int i=0; int len = keybuffer[14] >> 4; for (md4_size = 0; md4_size < len; i++, md4_size += 2) { #if ARCH_LITTLE_ENDIAN key.u16[md4_size] = keybuffer[i]; key.u16[md4_size+1] = keybuffer[i] >> 16; #else key.u16[md4_size] = keybuffer[i] >> 16; key.u16[md4_size+1] = keybuffer[i]; #endif } #if !ARCH_LITTLE_ENDIAN swap(key.u32, md4_size >> 1); #endif key.u16[len] = 0x00; return (char *)utf16_to_enc(key.u16); } // Public domain hash function by DJ Bernstein (salt is a username) static int salt_hash(void *salt) { UTF16 *s = salt; unsigned int hash = 5381; while (*s != 0x80) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_mscash = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, mscash1_common_tests }, { init, done, fmt_default_reset, mscash1_common_prepare, mscash1_common_valid, mscash1_common_split, get_binary, // NOTE, not using the 'common' binary function. get_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; /// This bit is set only for the Stmts that are the structured-block of /// OpenMP executable directives. Directives that have a structured block /// are called "non-standalone" directives. /// I.e. those returned by OMPExecutableDirective::getStructuredBlock(). unsigned IsOMPStructuredBlock : 1; }; enum { NumStmtBits = 9 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = NumStmtBits + 9 }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; /// The location of the declaration name itself. SourceLocation Loc; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArraySubscriptExprBitfields { friend class ArraySubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types and 0 otherwise. unsigned FPFeatures : 3; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 3; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArraySubscriptExprBitfields ArraySubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; StmtBits.IsOMPStructuredBlock = false; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; } void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) { StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock; } /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); body_begin()[size() - 1] = S; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
ps_local-inl.h
/*! * Copyright (c) 2014 by Contributors * \file ps_local-inl.h * \brief local multi-threading implementation of PS abstraction * * \author Tianqi Chen, Mu Li */ #ifndef MSHADOW_PS_LOCAL_INL_H_ // NOLINT(*) #define MSHADOW_PS_LOCAL_INL_H_ // NOLINT(*) #include <map> #include <utility> #include <string> #include <vector> #if defined(_OPENMP) #include <omp.h> #ifdef _MSC_VER typedef int ms_omp_uint; #else typedef unsigned ms_omp_uint; #endif #endif #include "./thread.h" #include "./thread_util.h" namespace mshadow { namespace ps { // multi-threaded implementation of template<typename xpu, typename DType> class LocalModel : public ISharedModel<xpu, DType> { public: // redefine callback function typedef typename ISharedModel<xpu, DType>::CallbackFunction CallbackFunction; // constructor LocalModel(void) { init_end = 0; perdev_pull_thread = 1; perdev_push_thread = 1; use_fifo_push_queue = 0; bigarray_bound = 1000 * 1000; nthread_reduction = 8; use_pin_memory = 1; test_on_server = 0; update_on_server = 0; destroy_signal = false; custom_server = NULL; } // destructor virtual ~LocalModel(void) { this->Destroy(); } inline void Destroy(void) { if (init_end != 0) { destroy_signal = true; for (size_t i = 0; i < push_queues.size(); ++i) { push_queues[i].Abort(1); } for (size_t i = 0; i < pull_queues.size(); ++i) { pull_queues[i].Abort(1); } for (size_t i = 0; i < thread_push_handler.size(); ++i) { thread_push_handler[i].Join(); } for (size_t i = 0; i < thread_pull_handler.size(); ++i) { thread_pull_handler[i].Join(); } for (size_t i = 0; i < push_queues.size(); ++i) { push_queues[i].Destroy(); } push_map.Destroy(); push_lock.Destroy(); for (size_t i = 0; i < pull_queues.size(); ++i) { pull_queues[i].Destroy(); } pull_map.Destroy(); request_lock.Destroy(); wait_lock.Destroy(); wait_cond.Destroy(); init_end = 0; } if (custom_server != NULL) { delete custom_server; custom_server = NULL; } } virtual void SetParam(const char *name, const char *val) { int key; if (sscanf(name, "push_op[%d]", &key) == 1) { if (!strcmp(val, "gather")) { request_lock.Lock(); push_operation[key] = kGather; request_lock.Unlock(); return; } if (!strcmp(val, "sum")) { push_operation[key] = kSum; return; } LOG(FATAL) << "unknown push operation " << val; } if (!strcmp(name, "reduce_thread")) { nthread_reduction = atoi(val); } if (!strcmp(name, "use_pin_memory")) { use_pin_memory = atoi(val); } if (!strcmp(name, "bigarray_bound")) { bigarray_bound = static_cast<size_t>(atol(val)); } if (!strcmp(name, "pull_thread")) { if (!strcmp(val, "ndev")) { perdev_pull_thread = 1; } else if (!strcmp(val, "one")) { perdev_pull_thread = 0; } else { LOG(FATAL) << "invalid value for parameter pull_thread," << " can only be ndev or one"; } } if (!strcmp(name, "push_thread")) { if (!strcmp(val, "ndev")) { perdev_push_thread = 1; } else if (!strcmp(val, "one")) { perdev_push_thread = 0; } else { LOG(FATAL) << "invalid value for parameter push_thread," << " can only be ndev or one"; } } if (!strcmp(name, "update_on_server")) { update_on_server = atoi(val); } if (!strcmp(name, "test_on_server")) { test_on_server = atoi(val); } // ignore message parameter if (!strncmp(name, "msg:", 4)) return; cfgvec.push_back(std::make_pair(std::string(name), std::string(val))); } virtual void PullWait(int key, int devid) { const int wid = GetWorkIndex(devid); PullEntry *p = pull_map.Get(key); if (p == NULL || p->wait.size() == 0) return; PullEntry &e = *p; // wake up waiters if any CHECK_EQ(e.wait.size(), devices.size()) << "PullWait: must initialize the wait"; PullWaitRecord &w = e.wait[wid]; if (!w.finished) { wait_lock.Lock(); w.nwait += 1; while (!w.finished) { wait_cond.Wait(&wait_lock); } w.nwait -= 1; CHECK_GE(w.nwait, 0) << "boundary check"; wait_lock.Unlock(); } } virtual void Init(const std::vector<int> &devices) { CHECK_EQ(init_end, 0) << "LocalServer.Init can only call Init once"; CHECK_NE(devices.size(), 0) << "LocalServer.Init: must at least contain 1 devices"; this->devices = devices; destroy_signal = false; // initialize device id to local index dev2index.clear(); for (size_t i = 0; i < devices.size(); ++i) { int devid = devices[i]; CHECK_GE(devid, 0) << "device id must be bigger than 0"; if (devid >= static_cast<int>(dev2index.size())) { dev2index.resize(devid + 1, -1); } dev2index[devid] = static_cast<int>(i); } // allocate space pull_stream.resize(devices.size()); push_stream.resize(devices.size()); // initialize all the thread related things if (perdev_push_thread != 0) { push_queues.resize(devices.size()); } else { push_queues.resize(1); } for (size_t i = 0; i < push_queues.size(); ++i) { push_queues[i].Init(use_fifo_push_queue != 0); } push_map.Init(); push_lock.Init(); pull_map.Init(); request_lock.Init(); wait_lock.Init(); wait_cond.Init(); if (perdev_pull_thread != 0) { pull_queues.resize(devices.size()); } else { pull_queues.resize(1); } for (size_t i = 0; i < pull_queues.size(); ++i) { pull_queues[i].Init(); } // initialize the thread if (perdev_push_thread != 0) { thread_push_handler.resize(devices.size()); for (size_t i = 0; i < devices.size(); ++i) { std::pair<LocalModel*, size_t> *p = new std::pair<LocalModel*, size_t>(); *p = std::make_pair(this, i); thread_push_handler[i].Start(PushLocalThread, p); } } else { thread_push_handler.resize(1); thread_push_handler[0].Start(PushGlobalThread, this); } // initialize pull handler if (perdev_pull_thread != 0) { thread_pull_handler.resize(devices.size()); for (size_t i = 0; i < devices.size(); ++i) { std::pair<LocalModel*, size_t> *p = new std::pair<LocalModel*, size_t>(); *p = std::make_pair(this, i); thread_pull_handler[i].Start(PullLocalThread, p); } } else { thread_pull_handler.resize(1); thread_pull_handler[0].Start(PullGlobalThread, this); } this->InitCustomerServer(); this->init_end = 1; } // set weight virtual void SetWeight_(Tensor<xpu, 2, DType> data, int key, int devid) { PushEntry &e = push_map.GetRef(key); Stream<xpu> s; push_lock.Lock(); mshadow::Copy(e.weight, data, &s); push_lock.Unlock(); } virtual void CheckWeight_(Tensor<xpu, 2, DType> data, int key, int devid) { CHECK_NE(test_on_server, 0) << "must be in pair debug mode"; PushEntry &e = push_map.GetRef(key); mshadow::TensorContainer<cpu, 2, DType> tmp(false); tmp.Resize(data.shape_); Stream<xpu> s; push_lock.Lock(); // copy data mshadow::Copy(tmp, data, &s); index_t count = tmp.shape_.Size(); double diff = 0.0, ssum = 0.0, maxdiff = 0.0; index_t mxidx = 0; for (index_t i = 0; i < count; ++i) { double d = std::abs(tmp.dptr_[i] - e.weight.dptr_[i]); if (d > maxdiff) { maxdiff = d; mxidx = i; } diff += d; ssum += std::abs(tmp.dptr_[i]); } push_lock.Unlock(); // relative absolute error double rerr = diff / ssum; if (rerr > 1e-5 || diff != diff) { fprintf(stderr, "PSLocal:key=%d,dev=%d: err=%f, maxd[%u]=%f, diff=%f, ssum=%f\n", key, devid, rerr, mxidx, maxdiff, diff, ssum); } else { fprintf(stderr, "PSLocal:key=%d,dev=%d:check pass\n", key, devid); } } protected: /*! \brief operation performed locally in PS */ enum LocalOp { /*! \brief take sum of all devices over the same key */ kSum = 0, /*! * \brief concatenate(gather), * the tensors in all devices with same key */ kGather = 1 }; virtual void InitKey_(Shape<2> shape, int key, int devid) { this->InitPullMap(key); this->InitPushMap(key, shape); } virtual void Push_(Tensor<xpu, 2, DType> data, int key, int devid, int priority) { PullEntry &e = pull_map.GetRef(key); e.req[GetWorkIndex(devid)].ready = false; if (perdev_push_thread != 0) { int wid = GetWorkIndex(devid); push_queues[wid].Push(PullTask(data, key, devid), priority); } else { push_queues[0].Push(PullTask(data, key, devid), priority); } } virtual void PullReq_(Tensor<xpu, 2, DType> data, int key, int devid, int priority, CallbackFunction callback, void *callback_arg) { PullEntry &e = pull_map.GetRef(key); CHECK_EQ(e.req.size(), devices.size()) << "PullReq: must initialize the key, req"; CHECK_EQ(e.wait.size(), devices.size()) << "PullReq: must initialize the key, wait"; const int wid = GetWorkIndex(devid); PullReqRecord &r = e.req[wid]; r.dest = data; r.priority = priority; r.callback = callback; r.callback_arg = callback_arg; // reset pull request finish mark wait_lock.Lock(); e.wait[wid].finished = false; wait_lock.Unlock(); // check ready event request_lock.Lock(); CHECK_EQ(!r.pending, true) << "key = " << key << "cannot send duplicate pull request before it finishes"; if (e.req[wid].ready) { if (perdev_pull_thread != 0) { pull_queues[wid].Push(std::make_pair(key, devid)); } else { pull_queues[0].Push(std::make_pair(key, devid)); } } else { r.pending = true; } request_lock.Unlock(); } /*! * \brief called to notify that the data is ready for pull * \param data the data that can be pulled back * \param the key of the data */ virtual void PullReady(Tensor<cpu, 2> data, int key) { PullEntry &e = pull_map.GetRef(key); CHECK_EQ(e.req.size(), devices.size()) << "PullReady: must initialize the key, req"; request_lock.Lock(); e.src = data; for (index_t i = 0; i < e.req.size(); ++i) { e.req[i].ready = true; if (e.req[i].pending) { if (perdev_pull_thread != 0) { pull_queues[i].Push(std::make_pair(key, devices[i])); } else { pull_queues[0].Push(std::make_pair(key, devices[i])); } e.req[i].pending = false; } } request_lock.Unlock(); } virtual void ServerInitKey(Tensor<cpu, 2> weight, int key) { if (custom_server != NULL) { // intialize server, and ready for pullback custom_server->InitModel(key, weight.dptr_, weight.MSize()); if (update_on_server != 0) { this->PullReady(weight, key); } } } /*! * \brief event handler for push finish * called when all the data with same key comes int * \param data the buffer holds the data in all devices * \param key the key of the data */ virtual void HandlePushFinish(Tensor<cpu, 3, DType> data, int key) { // LOG(ERROR) << dbstr(data); LocalOp op = kSum; typename std::map<int, LocalOp>::const_iterator it = push_operation.find(key); if (it != push_operation.end() && it->first == key) { op = it->second; } // customized server if (custom_server != NULL) { this->ReduceSum(data); custom_server->Update(key, data[0].dptr_, data[0].MSize()); if (update_on_server != 0) { PushEntry &e = push_map.GetRef(key); this->PullReady(e.weight, key); } else { CHECK_NE(test_on_server, 0) << "test mode"; this->PullReady(data[0], key); } return; } switch (op) { case kSum: { this->ReduceSum(data); this->PullReady(data[0], key); return; } case kGather: { this->PullReady(data.FlatTo2D(), key); return; } default: LOG(FATAL) << "unknown LocalOp"; } } /*! * \brief event handler for reduce finish * called when all the data with same key finishes the reduction * \param data the buffer holds the reduction result * \param key the key of the data */ inline void HandleReduceFinish(Tensor<cpu, 2, DType> data, int key) { if (custom_server != NULL) { custom_server->Update(key, data.dptr_, data.MSize()); if (update_on_server != 0) { PushEntry &e = push_map.GetRef(key); this->PullReady(e.weight, key); } else { CHECK_NE(test_on_server, 0) << "test mode"; this->PullReady(data, key); } } else { this->PullReady(data, key); } } virtual void InitCustomerServer(void) { if (update_on_server != 0 || test_on_server != 0) { custom_server = CreateModelUpdater<DType>(); for (size_t j = 0; j < cfgvec.size(); ++j) { custom_server->SetParam(cfgvec[j].first.c_str(), cfgvec[j].second.c_str()); } custom_server->InitUpdater(0, 0, NULL); } } protected: // customized server IModelUpdater<DType> *custom_server; // whether use fifo push queue int use_fifo_push_queue; // perform sum reduction inline void ReduceSum(Tensor<cpu, 3, DType> data) { #if defined(_OPENMP) if (data[0].MSize() >= bigarray_bound && nthread_reduction != 0) { ms_omp_uint ntask = static_cast<ms_omp_uint>(data.size(1)); #pragma omp parallel for schedule(static) num_threads(nthread_reduction) for (ms_omp_uint j = 0; j < ntask; ++j) { for (index_t i = 1; i < data.size(0); ++i) { data[0][j] += data[i][j]; } } } else //NOLINT(*) #endif { for (index_t i = 1; i < data.size(0); ++i) { data[0] += data[i]; } } } private: /*! \brief task running */ struct PullTask { /*! \brief the task data source */ Tensor<xpu, 2, DType> data; /*! \brief the key to the tensor */ int key; /*! * \brief the device id, (key,devid), * uniquely identifies a mem location */ int devid; PullTask(void) {} PullTask(Tensor<xpu, 2, DType> data, int key, int devid) : data(data), key(key), devid(devid) {} }; /*! \brief data structure to hold temporal push result */ struct PushEntry { // temporal space to hold input data Tensor<cpu, 4, DType> data; // temporal space to hold weight, if needed Tensor<cpu, 2, DType> weight; // indicator whether the certain devices is already copied in std::vector<bool> copied; // number of data copied in int num_copied; // version number of data used to hold incomming data in push int copyin_version; // use pinned memory bool pin_memory; // constructor PushEntry(void) : copyin_version(0) { weight.dptr_ = NULL; } ~PushEntry(void) { if (data.dptr_ != NULL) { if (pin_memory) { mshadow::FreeHost<xpu>(&data); if (weight.dptr_ != NULL) { mshadow::FreeHost<xpu>(&weight); } } else { mshadow::FreeSpace(&data); if (weight.dptr_ != NULL) { mshadow::FreeSpace(&weight); } } } } // constructor inline void Init(int ndevice, Shape<2> shape, bool pin_memory, bool need_weight) { this->pin_memory = pin_memory; data.shape_ = Shape4(2, ndevice, shape[0], shape[1]); weight.shape_ = shape; if (pin_memory) { mshadow::AllocHost<xpu>(&data); if (need_weight) mshadow::AllocHost<xpu>(&weight); } else { mshadow::AllocSpace(&data, false); if (need_weight) mshadow::AllocSpace(&weight); } CHECK_EQ(data.CheckContiguous(), true) << "Data must be contiguous"; CHECK(!need_weight || weight.CheckContiguous()) << "Weight must be contiguous"; num_copied = 0; copied.resize(ndevice, false); } }; // a record to remember things related to pull request struct PullReqRecord { // whether this record contains a pending request // whether pull is ready to go bool ready; // waiting for pull ready bool pending; // the destination to pull data into Tensor<xpu, 2, DType> dest; // the priority of the int priority; // callback function CallbackFunction *callback; // argument for callback void *callback_arg; PullReqRecord(void) : ready(false), pending(false) { } }; // a record to help handle pullwait struct PullWaitRecord { // number of thread that waits for the request to finish int nwait; // the request was finished bool finished; PullWaitRecord(void) : nwait(0), finished(true) { // set finished to true so pull without pull request returns } }; /*! \brief data structure to hold pull request */ struct PullEntry { // data to be pulled back Tensor<cpu, 2, DType> src; // pullrequest record std::vector<PullReqRecord> req; // whether there is thread waiting on this event std::vector<PullWaitRecord> wait; PullEntry(void) { } }; // signal to notify all the thread about class destruction bool destroy_signal; // vector of devices std::vector<int> devices; // device index to local index std::vector<int> dev2index; //----- data structure used to support push ---- // stream used by push thread each device for memcpy std::vector<Stream<xpu>*> push_stream; // the queue used for push task std::vector<utils::ThreadPQueue<PullTask> > push_queues; // thread to handle push task std::vector<utils::Thread> thread_push_handler; // lock to lock push field utils::Mutex push_lock; // the map of push buffer utils::ThreadSafeMap<PushEntry> push_map; // customized local reduction operation std::map<int, LocalOp> push_operation; //----- data structure used to support pull ---- // the queue used for pull task std::vector<utils::ThreadPQueue<std::pair<int, int> > > pull_queues; // stream used by pull thread each device for memcpy std::vector<Stream<xpu>*> pull_stream; // the map to store pull status utils::ThreadSafeMap<PullEntry> pull_map; // thread to handle pull task std::vector<utils::Thread> thread_pull_handler; // lock to lock request field utils::Mutex request_lock; // lock to lock wait field utils::Mutex wait_lock; // conditional variable to do waiting utils::ConditionVariable wait_cond; // ---------configurations of server------- int init_end; // whether perform update on serverside int update_on_server; // debug option int test_on_server; // use pinned memory int use_pin_memory; // number of reduction thread int nthread_reduction; // the threshold for big array size_t bigarray_bound; // whether use pull thread per device int perdev_pull_thread; // whether use push thread per device int perdev_push_thread; /*! \brief history of configurations */ std::vector< std::pair<std::string, std::string> > cfgvec; // push handler inline void PushProc(utils::ThreadPQueue<PullTask> *queue) { while (!destroy_signal) { PullTask tsk; if (queue->Pop(&tsk)) { const int wid = GetWorkIndex(tsk.devid); PushEntry &e = push_map.GetRef(tsk.key); CHECK_EQ(e.data[0][0].shape_, tsk.data.shape_) << "Tensor with same key must share same shape " << e.data[0][0].shape_ << " vs " << tsk.data.shape_; CHECK_EQ(!e.copied[wid], true) << "data inconsistency"; // start copy SetDevice<xpu>(tsk.devid); Copy(e.data[e.copyin_version][wid], tsk.data, push_stream[wid]); // wait till the copy finishes push_stream[wid]->Wait(); // mark copied e.copied[wid] = true; push_lock.Lock(); e.num_copied += 1; int cp_version = e.copyin_version; bool push_finish = e.num_copied >= static_cast<int>(devices.size()); if (push_finish) { // switch version e.copyin_version = (e.copyin_version + 1) % e.data.size(0); std::fill(e.copied.begin(), e.copied.end(), false); e.num_copied = 0; } push_lock.Unlock(); if (push_finish) { this->HandlePushFinish(e.data[cp_version], tsk.key); } } else { CHECK_EQ(destroy_signal, true) << "abort but not destroy"; } } } inline void PushHandlerGlobal(void) { // allocate stream resources for (size_t i = 0; i < devices.size(); ++i) { SetDevice<xpu>(devices[i]); push_stream[i] = NewStream<xpu>(devices[i]); } this->PushProc(&push_queues[0]); // free resources for (size_t i = 0; i < devices.size(); ++i) { SetDevice<xpu>(devices[i]); DeleteStream(push_stream[i]); } } inline void PushHandlerLocal(size_t tid) { CHECK_LT(tid, devices.size()) << "threadid exceed boundary"; CHECK_EQ(push_queues.size(), devices.size()) << "must have one pull_queue per device"; // allocate stream resources SetDevice<xpu>(devices[tid]); push_stream[tid] = NewStream<xpu>(devices[tid]); this->PushProc(&push_queues[tid]); SetDevice<xpu>(devices[tid]); DeleteStream(push_stream[tid]); } /*!\brief entry point of loader thread */ inline static MSHADOW_THREAD_PREFIX PushGlobalThread(void *pthread) { static_cast<LocalModel*>(pthread)->PushHandlerGlobal(); utils::ThreadExit(NULL); return NULL; } inline static MSHADOW_THREAD_PREFIX PushLocalThread(void *arg) { std::pair<LocalModel*, size_t> *p = static_cast<std::pair<LocalModel*, size_t>*>(arg); p->first->PushHandlerLocal(p->second); delete p; return NULL; } // push handler procedure inline void PullProc(utils::ThreadPQueue<std::pair<int, int> > *queue) { while (!destroy_signal) { std::pair<int, int> tsk; if (queue->Pop(&tsk)) { const int key = tsk.first; const int devid = tsk.second; const int wid = GetWorkIndex(devid); PullEntry &e = pull_map.GetRef(key); { // handle request CHECK_EQ(e.req.size(), devices.size()) << "PullHandler: must initialize the key, req"; PullReqRecord &r = e.req[wid]; SetDevice<xpu>(devid); Copy(r.dest, e.src, pull_stream[wid]); // callback, if any if (r.callback != NULL) { (*r.callback)(pull_stream[wid], r.callback_arg); } // wait till the operation finishes pull_stream[wid]->Wait(); } { // wake up waiters if any CHECK_EQ(e.wait.size(), devices.size()) << "PullHandler, must initialize the key, req"; PullWaitRecord &w = e.wait[wid]; wait_lock.Lock(); w.finished = true; if (w.nwait != 0) { wait_cond.Broadcast(); } wait_lock.Unlock(); } } else { CHECK_EQ(destroy_signal, true) << "abort but not destroy"; } } } // use one thread for all pull actions inline void PullHandlerGlobal(void) { // allocate stream resources for (size_t i = 0; i < devices.size(); ++i) { SetDevice<xpu>(devices[i]); pull_stream[i] = NewStream<xpu>(devices[i]); } this->PullProc(&pull_queues[0]); // free resources for (size_t i = 0; i < devices.size(); ++i) { SetDevice<xpu>(devices[i]); DeleteStream(pull_stream[i]); } } inline void PullHandlerLocal(size_t tid) { CHECK_LT(tid, devices.size()) << "threadid exceed boundary"; CHECK_EQ(pull_queues.size(), devices.size()) << "must have one pull_queue per device"; // allocate stream resources SetDevice<xpu>(devices[tid]); pull_stream[tid] = NewStream<xpu>(devices[tid]); this->PullProc(&pull_queues[tid]); SetDevice<xpu>(devices[tid]); DeleteStream(pull_stream[tid]); } /*!\brief entry point of pull thread, one thread for all devices */ inline static MSHADOW_THREAD_PREFIX PullGlobalThread(void *arg) { static_cast<LocalModel*>(arg)->PullHandlerGlobal(); return NULL; } inline static MSHADOW_THREAD_PREFIX PullLocalThread(void *arg) { std::pair<LocalModel*, size_t> *p = static_cast<std::pair<LocalModel*, size_t>*>(arg); p->first->PullHandlerLocal(p->second); delete p; return NULL; } // get internal index of device inline int GetWorkIndex(int devid) const { CHECK(devid >= 0 && devid < static_cast<int>(dev2index.size()) && dev2index[devid] >= 0) << "Push: invalid devid"; return dev2index[devid]; } // functions to handle pull inline void InitPullMap(int key) { pull_map.Init(key); PullEntry &e = pull_map.GetRef(key); request_lock.Lock(); // must recheck after lock if (e.req.size() == 0) { e.req.resize(devices.size(), PullReqRecord()); } request_lock.Unlock(); // check wait map wait_lock.Lock(); // must recheck after lock if (e.wait.size() == 0) { e.wait.resize(devices.size(), PullWaitRecord()); } wait_lock.Unlock(); } // functions to handle pull inline void InitPushMap(int key, Shape<2> shape) { push_map.Init(key); PushEntry &e = push_map.GetRef(key); push_lock.Lock(); if (e.copied.size() == 0) { e.Init(devices.size(), shape, use_pin_memory != 0, update_on_server != 0 || test_on_server != 0); } this->ServerInitKey(e.weight, key); push_lock.Unlock(); } }; } // namespace ps } // namespace mshadow #endif // MSHADOW_PS_LOCAL_INL_H_ NOLINT(*)
timing_override.h
#ifndef _BENCH_CHOLESKY_TIMING_OVR_ #define _BENCH_CHOLESKY_TIMING_OVR_ #include "timing.h" #ifdef USE_TIMING #define wait(req) wait_impl(req, &__timing[THREAD_NUM].ts[__timer]) #define waitall(req, nreq) waitall_impl(req, nreq, &__timing[THREAD_NUM].ts[__timer]) static void wait_impl(MPI_Request *comm_req, double *timer) { int comm_comp = 0; MPI_Test(comm_req, &comm_comp, MPI_STATUS_IGNORE); while (!comm_comp) { double yield_time = timestamp(); #pragma omp taskyield *timer -= timestamp() - yield_time; MPI_Test(comm_req, &comm_comp, MPI_STATUS_IGNORE); } } static void waitall_impl(MPI_Request *comm_req, int nreq, double *timer) { int comm_comp = 0; MPI_Testall(nreq, comm_req, &comm_comp, MPI_STATUS_IGNORE); while (!comm_comp) { double yield_time = timestamp(); #pragma omp taskyield *timer -= timestamp() - yield_time; MPI_Testall(nreq, comm_req, &comm_comp, MPI_STATUS_IGNORE); } } #endif #endif // _BENCH_CHOLESKY_TIMING_
loop_dispatch.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // XFAIL: gcc // GCC doesn't call runtime for static schedule #include "callback.h" #define WORK_SIZE 64 int main() { int i; int wait_s = 0; #pragma omp parallel num_threads(4) { int wait_id = 0; int team_size = omp_get_num_threads(); #pragma omp for schedule(static, WORK_SIZE / 4) for (i = 0; i < WORK_SIZE; i++) {} #pragma omp for schedule(dynamic) for (i = 0; i < WORK_SIZE; i++) { if (wait_id == 0) { // Wait until every thread has at least one iteration assigned OMPT_SIGNAL(wait_s); OMPT_WAIT(wait_s, team_size); wait_id++; } } #pragma omp for schedule(guided) for (i = 0; i < WORK_SIZE; i++) { if (wait_id == 1) { // Wait until every thread has at least one iteration assigned OMPT_SIGNAL(wait_s); OMPT_WAIT(wait_s, 2 * team_size); wait_id++; } } } return 0; } // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dispatch' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[THREAD_ID0:[0-9]+]]: ompt_event_parallel_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]] // Each thread should have at least one ws-loop-chunk-begin event for each // for loop. // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_implicit_task_begin: // CHECK-SAME: task_id=[[TASK_ID0:[0-9]+]] // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID0]] // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16 // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID0]] // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1 // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID0]] // CHECK: {{^}}[[THREAD_ID0]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID0]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}} // CHECK: {{^}}[[THREAD_ID1:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: task_id=[[TASK_ID1:[0-9]+]] // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID1]] // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16 // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID1]] // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1 // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID1]] // CHECK: {{^}}[[THREAD_ID1]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID1]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}} // CHECK: {{^}}[[THREAD_ID2:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: task_id=[[TASK_ID2:[0-9]+]] // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID2]] // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16 // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID2]] // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1 // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID2]] // CHECK: {{^}}[[THREAD_ID2]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID2]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}} // CHECK: {{^}}[[THREAD_ID3:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: task_id=[[TASK_ID3:[0-9]+]] // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID3]] // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16 // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID3]] // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=1 // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID3]] // CHECK: {{^}}[[THREAD_ID3]]: ompt_event_ws_loop_chunk_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID3]] // CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations={{[1-9][0-9]*}}
sieve.c
#include "sieve.h" #include <omp.h> #include <stdlib.h> #include "magic.h" #include "segment.h" #include "utils.h" #define MAX_UPPER 18446744073709551600U /* (2^64 - 1) / 30 * 30 */ typedef struct kit { u8 *magic; u32 magic_size; u32 *primes; u32 primes_size; } kit; static const u32 pi[30] = { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10 }; static const u32 small[10] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29 }; static void enlist_prime(void *ctx, u64 prime) { kit *k; k = (kit *)ctx; k->primes[k->primes_size++] = prime; } static kit *kit_new(u32 upper, u32 segment_size) { kit *k; segment *s; u32 sqrt_upper; u32 limit; u32 end; u32 i; k = ez_malloc(sizeof(*k)); k->magic = magic_new(&k->magic_size); k->primes = ez_malloc((pi_upper(upper) + 7) * sizeof(*k->primes)); k->primes_size = 0; sqrt_upper = isqrt(upper); /* Non-segmented bootstrap sieve. */ s = segment_bootstrap(sqrt_upper, k->magic, k->magic_size); segment_extract(s, enlist_prime, k); segment_free(s); s = segment_new(segment_size); end = (upper - 1) / 30 + 1; limit = (end < segment_size) ? 0 : end - (segment_size - 1); /* Starts segmented sieving from where the bootstrap sieve left off. */ for (i = (sqrt_upper - 1) / 30 + 1; i < limit; i += segment_size) { segment_init(s, k->magic, k->magic_size, i, i + segment_size); segment_sieve(s, k->primes, k->primes_size); segment_extract(s, enlist_prime, k); } if (i < end) { segment_init(s, k->magic, k->magic_size, i, end); segment_sieve(s, k->primes, k->primes_size); segment_extract(s, enlist_prime, k); } segment_free(s); k->primes = ez_realloc(k->primes, k->primes_size * sizeof(*k->primes)); return k; } static void kit_free(kit *k) { magic_free(k->magic); free(k->primes); free(k); } /* lower >= 30, upper <= MAX_UPPER */ static u64 sieve_count_range(kit *k, u64 lower, u64 upper, u32 segment_size) { segment *s; u64 result; u64 start; u64 end; u64 limit; u64 i; result = 0; start = lower / 30; end = (upper - 1) / 30 + 1; limit = (end < segment_size) ? 0 : end - (segment_size - 1); s = segment_new(segment_size); segment_init(s, k->magic, k->magic_size, start, MIN(start + segment_size, end)); segment_sieve(s, k->primes, k->primes_size); segment_trim_lower(s, lower); for (i = start + segment_size; i < limit; i += segment_size) { result += segment_count(s); segment_init(s, k->magic, k->magic_size, i, i + segment_size); segment_sieve(s, k->primes, k->primes_size); } if (i < end) { result += segment_count(s); segment_init(s, k->magic, k->magic_size, i, end); segment_sieve(s, k->primes, k->primes_size); } segment_trim_upper(s, upper); result += segment_count(s); segment_free(s); return result; } /* lower >= 30, upper <= MAX_UPPER */ static void sieve_generate_range(kit *k, u64 lower, u64 upper, u32 segment_size, callback cb, void *ctx) { segment *s; u64 start; u64 end; u64 limit; u64 i; start = lower / 30; end = (upper - 1) / 30 + 1; limit = (end < segment_size) ? 0 : end - (segment_size - 1); s = segment_new(segment_size); segment_init(s, k->magic, k->magic_size, start, MIN(start + segment_size, end)); segment_sieve(s, k->primes, k->primes_size); segment_trim_lower(s, lower); for (i = start + segment_size; i < limit; i += segment_size) { segment_extract(s, cb, ctx); segment_init(s, k->magic, k->magic_size, i, i + segment_size); segment_sieve(s, k->primes, k->primes_size); } if (i < end) { segment_extract(s, cb, ctx); segment_init(s, k->magic, k->magic_size, i, end); segment_sieve(s, k->primes, k->primes_size); } segment_trim_upper(s, upper); segment_extract(s, cb, ctx); segment_free(s); } u64 sieve_count(u64 lower, u64 upper, u32 segment_size, u32 max_threads) { kit *k; u64 result; u64 range; u64 segment; u64 segments; u32 threads; u64 interval; u32 remainder; u64 offset; u64 start; u64 end; u32 i; upper = MIN(upper, MAX_UPPER); if (upper < lower) return 0; if (upper < 30) return pi[upper] - pi[MAX(lower, 1) - 1]; if (lower < 30) { result = 10 - pi[MAX(lower, 1) - 1]; lower = 30; } else result = 0; k = kit_new(isqrt(upper), segment_size); range = upper - lower; segment = segment_size * 30; segments = range / segment + (range % segment != 0); threads = omp_get_num_procs(); threads = MIN(threads, segments); threads = MIN(threads, max_threads); threads = MAX(threads, 1); omp_set_num_threads(threads); interval = range / threads; remainder = range % threads; offset = lower + (interval + 1) * remainder; #pragma omp parallel for reduction(+: result) private(start, end) for (i = 0; i < threads; ++i) { if (i < remainder) { start = lower + (interval + 1) * i; end = start + (interval + 1) - 1; } else { start = offset + interval * (i - remainder); end = (start + interval == upper) ? upper : start + interval - 1; } result += sieve_count_range(k, start, end, segment_size); } kit_free(k); return result; } void sieve_generate(u64 lower, u64 upper, u32 segment_size, callback cb, void *ctx) { kit *k; u32 i; upper = MIN(upper, MAX_UPPER); if (upper < lower) return; if (lower < 30) { for (i = 0; i < 10; ++i) { if (upper < small[i]) return; if (lower <= small[i]) cb(ctx, small[i]); } lower = 30; } k = kit_new(isqrt(upper), segment_size); sieve_generate_range(k, lower, upper, segment_size, cb, ctx); kit_free(k); }
convolution_winograd_transform.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_output_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); const float bias0 = biasptr ? biasptr[p] : 0.f; float tmp[6][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 1; const float* output0_tm_1 = output0_tm_0 + tiles * 1; const float* output0_tm_2 = output0_tm_0 + tiles * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 7; // TODO sse optimize for (int m = 0; m < 8; m++) { float tmp024a = output0_tm_1[0] + output0_tm_2[0]; float tmp135a = output0_tm_1[0] - output0_tm_2[0]; float tmp024b = output0_tm_3[0] + output0_tm_4[0]; float tmp135b = output0_tm_3[0] - output0_tm_4[0]; float tmp024c = output0_tm_5[0] + output0_tm_6[0]; float tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 8; output0_tm_1 += tiles * 8; output0_tm_2 += tiles * 8; output0_tm_3 += tiles * 8; output0_tm_4 += tiles * 8; output0_tm_5 += tiles * 8; output0_tm_6 += tiles * 8; output0_tm_7 += tiles * 8; } float* output0 = out0.row(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } }
rg_filter.c
////////////////////////////////////// // Cunren Liang, NASA JPL/Caltech // Copyright 2015-2018... ////////////////////////////////////// #include "resamp.h" #include <fftw3.h> #include <omp.h> int rg_filter(char *inputfile, int nrg, int nout, char **outputfile, float *bw, float *bc, int nfilter, int nfft, float beta, int zero_cf, float offset){ /* inputfile: input file nrg file width nout: number of output files outputfile: (value_of_out_1, value_of_out_2, value_of_out_3...) output files bw: (value_of_out_1, value_of_out_2, value_of_out_3...) filter bandwidth divided by sampling frequency [0, 1] bc: (value_of_out_1, value_of_out_2, value_of_out_3...) filter center frequency divided by sampling frequency nfilter: number samples of the filter (odd). Reference Value: 65 nfft: number of samples of the FFT. Reference Value: 1024 beta: kaiser window beta. Reference Value: 1.0 zero_cf: if bc != 0.0, move center frequency to zero? 0: Yes (Reference Value). 1: No. offset: offset (in samples) of linear phase for moving center frequency. Reference Value: 0.0 */ /////////////////////////////// // int k; // printf("input parameters:"); // printf("%s\n", inputfile); // printf("%d\n", nrg); // printf("%d\n", nout); // for(k =0; k<nout;k++){ // printf("%s\n", outputfile[k]); // printf("%f\n", bw[k]); // printf("%f\n", bc[k]); // } // printf("%d\n", nfilter); // printf("%d\n", nfft); // printf("%f\n", beta); // printf("%d\n", zero_cf); // printf("%f\n", offset); /////////////////////////////// FILE *infp; //slave image to be resampled FILE **outfp; //resampled slave image fcomplex **filter; fcomplex *in; fcomplex **out; fcomplex *tmp; fcomplex *tmp2; fcomplex *tmpf; int *zeroflag; fftwf_plan p_forward; fftwf_plan p_backward; fftwf_plan p_forward_filter; //fftwf_plan p_backward_filter; //int nout; //number of output files //int nrg; //file width int naz; //file length //int nfft; //fft length //int nfilter; //filter length int hnfilter; //float *bw; //float *bc; //float beta; //kaiser window beta //int zero_cf; //float offset; int argc_mand; int nthreads; float sc; //constant to scale the data read in to avoid large values //during fft and ifft float cf_pha; float t; fcomplex cf; int nblock_in; int nblock_out; int num_block; int i_block; int nblock_in_last; int nblock_out_last; int i, j, i_out; /*****************************************************************************/ //nfilter = 65; //nfft = 1024; //beta = 1.0; //zero_cf = 0; //offset = 0.0; sc = 10000.0; /*****************************************************************************/ infp = openfile(inputfile, "rb"); naz = file_length(infp, nrg, sizeof(fcomplex)); printf("file width: %d, file length: %d\n\n", nrg, naz); if(nout < 1){ fprintf(stderr, "there should be at least one output file!\n"); exit(1); } outfp = array1d_FILE(nout); for(i = 0; i < nout; i++){ outfp[i] = openfile(outputfile[i], "wb"); } //check filter length if(nfilter < 3){ fprintf(stderr, "filter length: %d too small!\n", nfilter); exit(1); } if(nfilter % 2 != 1){ fprintf(stderr, "filter length must be odd!\n"); exit(1); } //compute block processing parameters hnfilter = (nfilter - 1) / 2; nblock_in = nfft - nfilter + 1; nblock_in += hnfilter; if (nblock_in <= 0){ fprintf(stderr, "fft length too small compared with filter length!\n"); exit(1); } nblock_out = nblock_in - 2 * hnfilter; num_block = (nrg - 2 * hnfilter) / nblock_out; if((nrg - num_block * nblock_out - 2 * hnfilter) != 0){ num_block += 1; } if((nrg - 2 * hnfilter) <= 0){ num_block = 1; } if(num_block == 1){ nblock_out_last = 0; nblock_in_last = nrg; } else{ nblock_out_last = nrg - (num_block - 1) * nblock_out - 2 * hnfilter; nblock_in_last = nblock_out_last + 2 * hnfilter; } //allocate memory filter = array2d_fcomplex(nout, nfft); in = array1d_fcomplex(nrg); out = array2d_fcomplex(nout, nrg); tmp = array1d_fcomplex(nfft); tmp2 = array1d_fcomplex(nfft); tmpf = array1d_fcomplex(nfft); zeroflag = array1d_int(nrg); //as said in the FFTW document, //Typically, the problem will have to involve at least a few thousand data points before threads become beneficial. //so I choose not to use Multi-threaded FFTW, as our FFT size is mostly small. if(0){ ////////////////////////////////////////////////////////////////////////////////////////////////// //Multi-threaded FFTW nthreads = fftwf_init_threads(); if(nthreads == 0){ fprintf(stderr, "WARNING: there is some error in using multi-threaded FFTW.\n"); fprintf(stderr, " therefore it is not used, and computation performance is reduced.\n"); nthreads = 1; } else{ //int this_thread = omp_get_thread_num(), num_threads = omp_get_num_threads(); //nthreads = omp_get_num_threads(); nthreads = omp_get_max_threads(); } printf("FFTW is using %d threads\n", nthreads); //this works for all the following plans if(nthreads != 1) //actually it is OK to pass nthreads=1, in this case, threads are disabled. fftwf_plan_with_nthreads(nthreads); ////////////////////////////////////////////////////////////////////////////////////////////////// } //create plans before initializing data, because FFTW_MEASURE overwrites the in/out arrays. p_forward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmp, (fftwf_complex*)tmp, FFTW_FORWARD, FFTW_MEASURE); p_backward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmp2, (fftwf_complex*)tmp2, FFTW_BACKWARD, FFTW_MEASURE); p_forward_filter = fftwf_plan_dft_1d(nfft, (fftwf_complex*)tmpf, (fftwf_complex*)tmpf, FFTW_FORWARD, FFTW_ESTIMATE); //computing filters for(i = 0; i < nout; i++){ bandpass_filter(bw[i], bc[i], nfilter, nfft, (nfilter-1)/2, beta, tmpf); //relationship of nr and matlab fft //nr fft matlab fft // 1 <==> ifft()*nfft // -1 <==> fft() //four1((float *)filter - 1, nfft, -1); fftwf_execute(p_forward_filter); for(j = 0; j < nfft; j++){ filter[i][j].re = tmpf[j].re; filter[i][j].im = tmpf[j].im; } } fftwf_destroy_plan(p_forward_filter); //process data for(i = 0; i < naz; i++){ //progress report if((i + 1) % 1000 == 0 || (i + 1) == naz) fprintf(stderr,"processing line: %6d of %6d\r", i+1, naz); if((i + 1) == naz) fprintf(stderr,"\n\n"); //read data readdata((fcomplex *)in, (size_t)nrg * sizeof(fcomplex), infp); #pragma omp parallel for private(j) shared(nrg,in, zeroflag, sc) for(j = 0; j < nrg; j++){ if(in[j].re != 0.0 || in[j].im != 0.0){ zeroflag[j] = 1; in[j].re *= 1.0 / sc; in[j].im *= 1.0 / sc; } else{ zeroflag[j] = 0; } } //process each block for(i_block = 0; i_block < num_block; i_block++){ //zero out //for(j = 0; j < nfft; j++){ // tmp[j].re = 0.0; // tmp[j].im = 0.0; //} memset((void *)tmp, 0, (size_t)nfft*sizeof(fcomplex)); //get data if(num_block == 1){ for(j = 0; j < nrg; j++){ tmp[j] = in[j]; } } else{ if(i_block == num_block - 1){ for(j = 0; j < nblock_in_last; j++){ tmp[j] = in[j+nblock_out*i_block]; } } else{ for(j = 0; j < nblock_in; j++){ tmp[j] = in[j+nblock_out*i_block]; } } } //four1((float *)tmp - 1, nfft, -1); //tested, the same as above fftwf_execute(p_forward); //process each output file for(i_out = 0; i_out < nout; i_out++){ //looks like this makes it slower, so comment out //#pragma omp parallel for private(j) shared(nfft, tmp2, filter, i_out, tmp) for(j = 0; j < nfft; j++) tmp2[j] = cmul(filter[i_out][j], tmp[j]); //four1((float *)tmp2 - 1, nfft, 1); //tested, the same as above fftwf_execute(p_backward); //get data if(num_block == 1){ for(j = 0; j < nrg; j++){ out[i_out][j] = tmp2[j]; } } else{ if(i_block == 0){ for(j = 0; j < hnfilter + nblock_out; j++){ out[i_out][j] = tmp2[j]; } } else if(i_block == num_block - 1){ for(j = 0; j < hnfilter + nblock_out_last; j++){ out[i_out][nrg - 1 - j] = tmp2[nblock_in_last - 1 - j]; } } else{ for(j = 0; j < nblock_out; j++){ out[i_out][j + hnfilter + i_block * nblock_out] = tmp2[j + hnfilter]; } } }//end of getting data }//end of processing each output file }//end of processing each block //move center frequency if(zero_cf == 0){ //process each output file //looks like this makes it slower, so comment out //#pragma omp parallel for private(i_out, j, t, cf_pha, cf) shared(nout, bc, nrg, offset, out) for(i_out = 0; i_out < nout; i_out++){ if(bc[i_out] != 0){ #pragma omp parallel for private(j, t, cf_pha, cf) shared(nrg, offset, bc, i_out, out) for(j = 0; j < nrg; j++){ //t = j - (nrg - 1.0) / 2.0; //make 0 index exactly at range center t = j + offset; //make 0 index exactly at range center cf_pha = 2.0 * PI * (-bc[i_out]) * t; cf.re = cos(cf_pha); cf.im = sin(cf_pha); out[i_out][j] = cmul(out[i_out][j], cf); } } } } //scale back and write data //process each output file for(i_out = 0; i_out < nout; i_out++){ //scale back #pragma omp parallel for private(j) shared(nrg, zeroflag, out, i_out, sc, nfft) for(j = 0; j < nrg; j++){ if(zeroflag[j] == 0){ out[i_out][j].re = 0.0; out[i_out][j].im = 0.0; } else{ out[i_out][j].re *= sc / nfft; out[i_out][j].im *= sc / nfft; } } //write data writedata((fcomplex *)out[i_out], nrg * sizeof(fcomplex), outfp[i_out]); } }//end of processing data fftwf_destroy_plan(p_forward); fftwf_destroy_plan(p_backward); free_array2d_fcomplex(filter); free_array1d_fcomplex(in); free_array2d_fcomplex(out); free_array1d_fcomplex(tmp); free_array1d_fcomplex(tmp2); free_array1d_fcomplex(tmpf); free_array1d_int(zeroflag); //free_array1d_float(bw); //free_array1d_float(bc); fclose(infp); for(i_out = 0; i_out < nout; i_out++) fclose(outfp[i_out]); //free_array1d_FILE(outfp); return 0; }//end main()
postCallFlowIssue.c
int g1; static void foo(); int main() { #pragma omp parallel { int i = 10; if (i < 5) { g1 = 5; foo(); foo(); i = i + 1; } } } static void foo() { g1 = g1 + 1; }
target_teams_distribute_parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for simd'}} #pragma omp target teams distribute parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for simd'}} #pragma omp target teams distribute parallel for simd foo void test_no_clause() { int i; #pragma omp target teams distribute parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp target teams distribute parallel for simd' must be a for loop}} #pragma omp target teams distribute parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target teams distribute parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}} #pragma omp target teams distribute parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}} #pragma omp target teams distribute parallel for simd; for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}} #pragma omp target teams distribute parallel for simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}} #pragma omp target teams distribute parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp target teams distribute parallel for simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute parallel for simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute parallel for simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute parallel for simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp target teams distribute parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}} // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}} #pragma omp target teams distribute parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-error@+4 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp target teams distribute parallel for simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}} for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target teams distribute parallel for simd' directive may not be firstprivate, predetermined as lastprivate}} for (int j = 0; j < 16; ++j) #pragma omp parallel for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute parallel for simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute parallel for simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute parallel for simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute parallel for simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute parallel for simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; // expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}} #pragma omp target teams distribute parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}} #pragma omp target teams distribute parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}} #pragma omp target teams distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp target teams distribute parallel for simd simdlen(64) safelen(8) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
ckdeg.c
/* Maximilien Danisch and Qinna Wang January 2015 http://bit.ly/maxdan94 maximilien.danisch@telecom-paristech.fr Info: Feel free to use these lines as you wish. This program computes the k-clique degrees of each node in the graph (that is the number of k-cliques the node belongs to). To compile: "gcc ckdeg.c -O9 -o ckdeg -fopenmp". To execute: "./ckdeg p k edgelist.txt kdeg.txt". p is the number of processors to use. k of k-clique to enumerate. "edgelist.txt" should contain the graph: one edge on each line separated by a space. Will print the total number of k-cliques. "kdeg.txt" will contain the k-clique degrees (node ID followed and its k-clique degree separated by a space on each line). Note: parallelisation over edges and increasing core ordering */ #include <stdlib.h> #include <stdio.h> #include <omp.h> #include <time.h> #define NLINKS 100000000 //maximum number of edges for memory allocation, will increase if needed // heap data structure : typedef struct { unsigned key; unsigned value; } keyvalue; typedef struct { unsigned n_max; // max number of nodes. unsigned n; // number of nodes. unsigned *pt; // pointers to nodes. keyvalue *kv; // nodes. } bheap; bheap *construct(unsigned n_max){ unsigned i; bheap *heap=malloc(sizeof(bheap)); heap->n_max=n_max; heap->n=0; heap->pt=malloc(n_max*sizeof(unsigned)); for (i=0;i<n_max;i++) heap->pt[i]=-1; heap->kv=malloc(n_max*sizeof(keyvalue)); return heap; } inline void swap(bheap *heap,unsigned i, unsigned j) { keyvalue kv_tmp=heap->kv[i]; unsigned pt_tmp=heap->pt[kv_tmp.key]; heap->pt[heap->kv[i].key]=heap->pt[heap->kv[j].key]; heap->kv[i]=heap->kv[j]; heap->pt[heap->kv[j].key]=pt_tmp; heap->kv[j]=kv_tmp; } inline void bubble_up(bheap *heap,unsigned i) { unsigned j=(i-1)/2; while (i>0) { if (heap->kv[j].value>heap->kv[i].value) { swap(heap,i,j); i=j; j=(i-1)/2; } else break; } } inline void bubble_down(bheap *heap) { unsigned i=0,j1=1,j2=2,j; while (j1<heap->n) { j=( (j2<heap->n) && (heap->kv[j2].value<heap->kv[j1].value) ) ? j2 : j1 ; if (heap->kv[j].value < heap->kv[i].value) { swap(heap,i,j); i=j; j1=2*i+1; j2=j1+1; continue; } break; } } inline void insert(bheap *heap,keyvalue kv){ heap->pt[kv.key]=(heap->n)++; heap->kv[heap->n-1]=kv; bubble_up(heap,heap->n-1); } inline void update(bheap *heap,unsigned key){ unsigned i=heap->pt[key]; if (i!=-1){ ((heap->kv[i]).value)--; bubble_up(heap,i); } } inline keyvalue popmin(bheap *heap){ keyvalue min=heap->kv[0]; heap->pt[min.key]=-1; heap->kv[0]=heap->kv[--(heap->n)]; heap->pt[heap->kv[0].key]=0; bubble_down(heap); return min; } // graph datastructure: typedef struct { unsigned s; unsigned t; } edge; typedef struct { //edge list structure: unsigned n; //number of nodes unsigned e; //number of edges unsigned n2; //number of nodes with core value larger than one unsigned e2; //number of edges between nodes with core value larger than one edge *edges;//list of edges //to compute a degeneracy ordering: unsigned *d0; //degrees unsigned *cd0; //cumulative degree: (start with 0) length=dim+1 unsigned *adj0; //list of neighbors unsigned *rank; //degeneracy rankings of nodes unsigned *map;//map[newlabel]=oldlabel unsigned core; //core number of the graph //truncated neighborhoods: unsigned *d; //truncated degrees unsigned *cd; //cumulative degree: (start with 0) length=dim+1 unsigned *adj; //list of neighbors with higher rank } sparse; //compute the maximum of three unsigned inline unsigned max3(unsigned a,unsigned b,unsigned c){ a=(a>b) ? a : b; return (a>c) ? a : c; } //reading the edgelist from file sparse* readedgelist(char* edgelist){ unsigned e1=NLINKS; sparse *g=malloc(sizeof(sparse)); FILE *file; g->n=0; g->e=0; file=fopen(edgelist,"r"); g->edges=malloc(e1*sizeof(edge)); while (fscanf(file,"%u %u", &(g->edges[g->e].s), &(g->edges[g->e].t))==2) { g->n=max3(g->n,g->edges[g->e].s,g->edges[g->e].t); if (g->e++==e1) { e1+=NLINKS; g->edges=realloc(g->edges,e1*sizeof(edge)); } } fclose(file); g->n++; g->edges=realloc(g->edges,g->e*sizeof(edge)); return g; } //Building the graph structure void mkgraph(sparse *g){ unsigned i; g->d0=calloc(g->n,sizeof(unsigned)); for (i=0;i<g->e;i++) { g->d0[g->edges[i].s]++; g->d0[g->edges[i].t]++; } g->cd0=malloc((g->n+1)*sizeof(unsigned)); g->cd0[0]=0; for (i=1;i<g->n+1;i++) { g->cd0[i]=g->cd0[i-1]+g->d0[i-1]; g->d0[i-1]=0; } g->adj0=malloc(2*g->e*sizeof(unsigned)); for (i=0;i<g->e;i++) { g->adj0[ g->cd0[g->edges[i].s] + g->d0[ g->edges[i].s ]++ ]=g->edges[i].t; g->adj0[ g->cd0[g->edges[i].t] + g->d0[ g->edges[i].t ]++ ]=g->edges[i].s; } } //Building the heap structure with (key,value)=(node,degree) for each node bheap* mkheap(sparse *g){ unsigned i; keyvalue kv; bheap* heap=construct(g->n); for (i=0;i<g->n;i++){ kv.key=i; kv.value=g->d0[i]; insert(heap,kv); } return heap; } void freeheap(bheap *heap){ free(heap->pt); free(heap->kv); free(heap); } //computing degeneracy ordering and core value void kcore(sparse* g,unsigned kmax){ unsigned i,j,r=0,n=g->n,k=kmax-1; keyvalue kv; unsigned c=0;//the core number bheap *heap=mkheap(g); g->rank=malloc(g->n*sizeof(unsigned)); g->map=malloc(g->n*sizeof(unsigned)); for (i=0;i<g->n;i++){ kv=popmin(heap); if (kv.value>c){ c=kv.value; } if (c<k){//remove node with core value less than kmax-1 g->rank[kv.key]=-1; n--; } else{ g->map[n-(++r)]=kv.key; g->rank[kv.key]=n-r; } for (j=g->cd0[kv.key];j<g->cd0[kv.key+1];j++){ update(heap,g->adj0[j]); } } freeheap(heap); free(g->d0); free(g->cd0); free(g->adj0); g->core=c; g->n2=n; } void relabelnodes(sparse *g) { unsigned i,j,source,target; j=0; for (i=0;i<g->e;i++) { source=g->rank[g->edges[i].s]; target=g->rank[g->edges[i].t]; if (source==-1 || target==-1){ continue; } if (source<target) { g->edges[j].s=target; g->edges[j++].t=source; } else { g->edges[j].s=source; g->edges[j++].t=target; } } g->e2=j; g->edges=realloc(g->edges,g->e2*sizeof(edge)); } //for future use in qsort int cmpfunc (const void * a, const void * b){ if (*(unsigned*)a>*(unsigned*)b){ return 1; } return -1; } //Building the special graph structure void mkspecial(sparse *g){ unsigned i; g->d=calloc(g->n2,sizeof(unsigned)); for (i=0;i<g->e2;i++) { g->d[g->edges[i].s]++; } g->cd=malloc((g->n2+1)*sizeof(unsigned)); g->cd[0]=0; for (i=1;i<g->n2+1;i++) { g->cd[i]=g->cd[i-1]+g->d[i-1]; g->d[i-1]=0; } g->adj=malloc((g->e2)*sizeof(unsigned)); for (i=0;i<g->e2;i++) { g->adj[g->cd[g->edges[i].s] + g->d[ g->edges[i].s ]++ ]=g->edges[i].t; } #pragma omp parallel for private(i) for (i=0;i<g->n2;i++) { qsort(&g->adj[g->cd[i]],g->d[i],sizeof(unsigned),cmpfunc); } //free(g->edges); Can be freed if node parallelisation is used instead of edge } void freesparse(sparse *g){ free(g->edges); free(g->rank); free(g->map); free(g->d); free(g->cd); free(g->adj); free(g); } //store the intersection of list1 and list2 in list3 and return the size of list3 (the 3 lists are sorted) inline unsigned merging(unsigned *list1, unsigned s1, unsigned *list2, unsigned s2,unsigned *list3){ unsigned i=0,j=0,s3=0; unsigned x=list1[0],y=list2[0]; while (i<s1 && j<s2){ if(x<y){ x=list1[++i]; continue; } if(y<x){ y=list2[++j]; continue; } list3[s3++]=x; x=list1[++i]; y=list2[++j]; } return s3; } //the recursion to compute all possible intersections void recursion(unsigned kmax, unsigned k, sparse* g, unsigned* ck, unsigned* merge, unsigned* size, unsigned long long* ckdeg){ unsigned t=(k-3)*g->core,t2=t+g->core; unsigned i, j, u; if (size[k-3]<kmax-k){//stop if we already know k-cliques cannot be formed return; } if (k==kmax){//increasing the k-clique degrees for (i=0;i<kmax-1;i++){ ckdeg[ck[i]]+=size[k-3]; } for (i=0;i<size[k-3];i++){ ckdeg[merge[t+i]]++; } return; } for(i=0; i<size[k-3]; i++){ ck[k-1]=merge[t+i]; size[k-2]=merging(&g->adj[g->cd[ck[k-1]]],g->d[ck[k-1]],&merge[t],size[k-3],&merge[t2]); recursion(kmax, k+1, g, ck, merge, size, ckdeg); } } //one pass over all k-cliques unsigned long long *onepass(sparse *g,unsigned kmax){ unsigned e,i,u,v,k; unsigned *merge,*size,*ck; unsigned long long *ckdeg_p,*ckdeg; if (kmax>2){ ckdeg=calloc(g->n2,sizeof(unsigned long long)); #pragma omp parallel private(merge,size,ck,ckdeg_p,e,u,v,k) shared(ckdeg) { merge=malloc((kmax-2)*g->core*sizeof(unsigned)); size=malloc((kmax-2)*sizeof(unsigned)); ck=malloc(kmax*sizeof(unsigned)); ckdeg_p=calloc(g->n2,sizeof(unsigned long long)); #pragma omp for schedule(dynamic, 1) nowait for(e=0; e<g->e2; e++){ ck[0]=g->edges[e].s; ck[1]=g->edges[e].t; size[0]=merging(&(g->adj[g->cd[ck[0]]]),g->d[ck[0]],&(g->adj[g->cd[ck[1]]]),g->d[ck[1]],merge); recursion(kmax,3,g,ck,merge,size,ckdeg_p); } #pragma omp critical { for (i=0;i<g->n2;i++){ ckdeg[i]+=ckdeg_p[i]; } } free(ckdeg_p); free(merge); free(size); } } return ckdeg; } unsigned long long printckdeg(unsigned long long *ckdeg,sparse *g, char* output){ unsigned i; unsigned long long sum=0; FILE* file=fopen(output,"w"); for (i=0;i<g->n2;i++){ fprintf(file,"%u %llu\n",g->map[i],ckdeg[i]); sum+=ckdeg[i]; } fclose(file); return sum; } int main(int argc,char** argv){ sparse* g; unsigned i, kmax=atoi(argv[2]); unsigned long long *ckdeg,nck; omp_set_num_threads(atoi(argv[1])); time_t t0,t1,t2; t1=time(NULL); t0=t1; printf("Reading edgelist from file %s\n",argv[3]); g=readedgelist(argv[3]); t2=time(NULL); printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60)); t1=t2; printf("Number of nodes: %u\n",g->n); printf("Number of edges: %u\n",g->e); printf("Building the graph structure\n"); mkgraph(g); printf("Computing degeneracy ordering\n"); kcore(g,kmax); relabelnodes(g); printf("Number of nodes (with core value > %u): %u\n",kmax-2,g->n2); printf("Number of edges (between nodes with core value > %u): %u\n",kmax-2,g->e2); printf("Core number = %u\n",g->core); mkspecial(g); t2=time(NULL); printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60)); t1=t2; printf("computing %u-clique degrees\n",kmax); ckdeg=onepass(g,kmax); nck=printckdeg(ckdeg,g,argv[4]); nck/=kmax; printf("Number of %u-cliques: %llu\n",kmax,nck); free(ckdeg); freesparse(g); t2=time(NULL); printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60)); t1=t2; printf("- Overall time = %ldh%ldm%lds\n",(t2-t0)/3600,((t2-t0)%3600)/60,((t2-t0)%60)); return 0; }
GB_binop__div_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint32) // A*D function (colscale): GB (_AxD__div_uint32) // D*A function (rowscale): GB (_DxB__div_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint32) // C=scalar+B GB (_bind1st__div_uint32) // C=scalar+B' GB (_bind1st_tran__div_uint32) // C=A+scalar GB (_bind2nd__div_uint32) // C=A'+scalar GB (_bind2nd_tran__div_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IDIV_UNSIGNED (x, y, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT32 || GxB_NO_DIV_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__div_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__div_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 32) ; \ } GrB_Info GB (_bind1st_tran__div_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 32) ; \ } GrB_Info GB (_bind2nd_tran__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mysql_netauth_fmt_plug.c
/* Cracker for MySQL network authentication hashes. Hacked together * during May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_mysqlna; #elif FMT_REGISTERS_H john_register_one(&fmt_mysqlna); #else #include "sha.h" #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1024// tuned K8-dual HT #endif #endif #include "memdbg.h" #define FORMAT_LABEL "mysqlna" #define FORMAT_NAME "MySQL Network Authentication" #define FORMAT_TAG "$mysqlna$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define HEX_LENGTH 40 #define CIPHERTEXT_LENGTH 90 #define BINARY_SIZE 20 #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN MEM_ALIGN_NONE #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests mysqlna_tests[] = { {"$mysqlna$2D52396369653E4626293B2F75244D3871507A39*7D63098BEE381A51AA6DF11E307E46BD4F8B6E0C", "openwall"}, {"$mysqlna$615c2b5e79656f7d4931594e5b5d416c7b483365*c3a70da2874db890eb2f0a5e3ea80b2ed17da0d0", "openwall"}, {"$mysqlna$295a687c59275452214b366b39776d3f31757b2e*7343f45c94cccd646a1b29bbfad064a9ee5c0380", "overlord magnum"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { char unsigned scramble[20]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; p = ciphertext + FORMAT_TAG_LEN; q = strstr(ciphertext, "*"); if(!q) return 0; if (q - p != HEX_LENGTH) return 0; while (atoi16[ARCH_INDEX(*p)] != 0x7F && p < q) p++; if (q - p != 0) return 0; if(strlen(p) < HEX_LENGTH) return 0; q = p + 1; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p - 1 == HEX_LENGTH; } static char* split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; strncpy(out, ciphertext, sizeof(out)); strlwr(out); return out; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; ctcopy += FORMAT_TAG_LEN; /* skip over "$mysqlna$" */ p = strtokm(ctcopy, "*"); for (i = 0; i < 20; i++) cs.scramble[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { unsigned char stage1_hash[20]; unsigned char inner_hash[20]; unsigned char token[20]; SHA_CTX ctx; int i; unsigned char *p = (unsigned char*)crypt_out[index]; SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA1_Final(stage1_hash, &ctx); SHA1_Init(&ctx); SHA1_Update(&ctx, stage1_hash, 20); SHA1_Final(inner_hash, &ctx); SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->scramble, 20); SHA1_Update(&ctx, inner_hash, 20); SHA1_Final(token, &ctx); for(i = 0; i < 20; i++) { p[i] = token[i] ^ stage1_hash[i]; } } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void mysqlna_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_mysqlna = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, mysqlna_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, mysqlna_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
transpose.c
#include <stdint.h> #include <stdio.h> #include <stdlib.h> #include "omp.h" typedef struct Matrix { uint16_t rows; uint16_t cols; uint8_t** data; // pointer to pointer of list of values } Matrix; /* @brief Initialize a matrix (of size `[n_rows, n_cols]`) with zeros. @param n_rows Number of rows. @param n_cols Number of columns. @returns Matrix of zeros. */ Matrix* zeros(uint16_t n_rows, uint16_t n_cols) { Matrix* matrix = (Matrix*)malloc(sizeof(Matrix)); if (matrix == NULL) printf("Failed to malloc...\n"); matrix->rows = n_rows; matrix->cols = n_cols; // Allocate memory for the data, and fill with zeros. // matrix->data = (double*) calloc(n_cols*n_rows, sizeof(double)); uint8_t** data = (uint8_t**)malloc(sizeof(uint8_t*) * n_rows); if (data == NULL) printf("Failed to malloc...\n"); for (uint16_t x = 0; x < n_rows; x++) { data[x] = (uint8_t*)calloc(n_cols, sizeof(uint8_t)); if (data[x] == NULL) printf("Failed to malloc...\n"); } matrix->data = data; return matrix; } /* @brief Create a matrix (2D tensor) with data provided. @param data Data to be inserted in row-major order. @param n_rows Number of rows. @param n_cols Number of columns. @returns Matrix of size [n,m] with values inserted. */ Matrix* fill(uint8_t* data, uint16_t n_rows, uint16_t n_cols) { Matrix* matrix = zeros(n_rows, n_cols); for (uint16_t x = 0; x < n_rows; x++) { for (uint16_t y = 0; y < n_cols; y++) { matrix->data[x][y] = *(data + (n_cols * x + y)); } } return matrix; } // @brief Free an allocated matrix. void free_matrix(Matrix* m) { for (int i = 0; i < m->rows; i++) { free(m->data[i]); } free(m->data); free(m); } // @brief Print matrix. void print_matrix(Matrix* m) { for (uint16_t x = 0; x < m->rows; x++) { for (uint16_t y = 0; y < m->cols; y++) { printf("%d\t", m->data[x][y]); } printf("\n"); } printf("\n"); } /* In-place NxN matrix transpose. Unfortunately, due to time constraints and the non-trivial nature of in-place O(1) NxM matrix transpose, we leave this for future work. We assume that NxM matrix is stored in row-major order with zero-based indexing. This means that the (n,m) element, for `n = [0,n-1]` and `m = [0, m-1]`, is stored at the memory address a = Mn+m (plus some offset, which we ignore). In the transposed MxN matrix, the corresponding (m,n) element is stored at the address a' = Nm+n. @param arr Array to be transposed. */ void transpose(Matrix* arr) { uint16_t i, j; // Parallel section: #ifdef _OPENMP #pragma omp parallel for private(i, j) num_threads(8) schedule(dynamic) for (i = 1; i < arr->rows; i++) { for (j = 0; j < i; j++) { uint8_t tmp = arr->data[i][j]; arr->data[i][j] = arr->data[j][i]; arr->data[j][i] = tmp; } } goto end; #endif // Serial section: for (i = 1; i < arr->rows; i++) { for (j = 0; j < i; j++) { uint8_t tmp = arr->data[i][j]; arr->data[i][j] = arr->data[j][i]; arr->data[j][i] = tmp; } } goto end; end: return; } int main(int argc, char* argv[]) { uint16_t N = atoi(argv[1]); uint8_t* a = (uint8_t*)calloc(N * N, sizeof(uint8_t)); if (a == NULL) printf("Failed to malloc...\n"); for (uint32_t i = 0; i < N * N; i++) { a[i] = (rand() % (255 - 0 + 1)) + 0; // generate random number between [0,255] } Matrix* orig = fill(a, N, N); // print_matrix(orig); transpose(orig); // print_matrix(orig); free_matrix(orig); }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) #define PARALLEL() { \ _Pragma("omp parallel num_threads(128)") \ { \ int i = omp_get_thread_num()*4; \ for (int j = i; j < i + 4; j++) { \ A[j] += C[j] + D[j]; \ } \ } \ } // Not sure how to invoke a macro multiple times #define PARALLEL5() { PARALLEL() PARALLEL() PARALLEL() PARALLEL() PARALLEL() } #define PARALLEL25() { PARALLEL5() PARALLEL5() PARALLEL5() PARALLEL5() PARALLEL5() } #define PARALLEL125() { PARALLEL25() PARALLEL25() PARALLEL25() PARALLEL25() PARALLEL25() } int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; INIT(); // // Test: Multiple parallel regions in a single target. // TEST({ for (int i = 0; i < 512; i++) { A[i] = 0; } PARALLEL125() }, VERIFY(0, 512, A[i], 125*(1+i))); return 0; }
main.c
#include <stdio.h> #include <stdbool.h> #include <errno.h> #include <sys/types.h> #include <sys/stat.h> #include "chacha.h" #define BUFFER_SIZE (1 << 16) static size_t filesize(const char* filename) { size_t size = 0; struct stat sb; if (stat(filename, &sb) != -1) { size = (size_t) sb.st_size; } return size; } static bool copy_file(const char* inpath, const char* outpath) { FILE* infile = fopen(inpath, "rb"); FILE* outfile = fopen(outpath, "wb"); bool success = true; // Handle file or allocation failure if (infile == NULL || outfile == NULL) { goto error; } // Disable I/O buffering setbuf(infile, NULL); setbuf(outfile, NULL); // Copy file blocks for (;;) { u8 buffer[BUFFER_SIZE]; // Read into buffer then write buffer const size_t read_size = fread(buffer, 1, BUFFER_SIZE, infile); const size_t write_size = fwrite(buffer, 1, read_size, outfile); // Check for EOF and errors if (read_size != BUFFER_SIZE || read_size != write_size) { if (ferror(infile) || ferror(outfile)) { goto error; } break; } } goto exit; error: fprintf(stderr, "[copy_file] copy '%s' to '%s' failed: %s\n", inpath, outpath, strerror(errno)); success = false; exit: // Close input file if (infile != NULL) { fclose(infile); } // Close output file if (outfile != NULL) { fclose(outfile); } return success; } bool crypt_file_parallel(const char* path, const u8 key[CHACHA_KEY_SIZE]) { size_t size = filesize(path); FILE* file = fopen(path, "rb+"); const size_t full_blocks = size / BUFFER_SIZE; const size_t partial_size = size % BUFFER_SIZE; bool success = true; // Handle file or allocation failure if (file == NULL || size == 0) { goto error; } // Disable I/O buffering setbuf(file, NULL); // Process full blocks of file data (using parallel threads if possible) #pragma omp parallel for default(shared) schedule(nonmonotonic:dynamic) for (size_t i = 0; i < full_blocks; i++) { const u64 offset = i * BUFFER_SIZE; u8 buffer[BUFFER_SIZE]; // Seekt to correct file position and read into buffer #pragma omp critical(file) { if (success) { fseek(file, (long int) offset, SEEK_SET); size_t read_size = fread(buffer, 1, BUFFER_SIZE, file); if (read_size != BUFFER_SIZE) { success = false; } } } // Uses a nonce with all zero bytes. NOTE: reusing a nonce value with the same // key when encrypting two different plaintexts will void the security of the cipher. // This example always uses a nonce of all zero bytes and is only for testing purposes. // Crypt local file data chacha_crypt_offset(key, 0, offset, buffer, buffer, BUFFER_SIZE); // Seek to correct file position and write crypted data #pragma omp critical(file) { if (success) { fseek(file, (long int) offset, SEEK_SET); size_t write_size = fwrite(buffer, 1, BUFFER_SIZE, file); if (write_size != BUFFER_SIZE) { success = false; } } } } // Handle last partial block of file data if (partial_size > 0) { const size_t offset = full_blocks * BUFFER_SIZE; u8 buffer[BUFFER_SIZE]; fseek(file, (long int) offset, SEEK_SET); size_t read_size = fread(buffer, 1, partial_size, file); if (read_size != partial_size) { goto error; } chacha_crypt_offset(key, 0, offset, buffer, buffer, partial_size); fseek(file, (long int) offset, SEEK_SET); size_t write_size = fwrite(buffer, 1, partial_size, file); if (write_size != read_size) { goto error; } } goto exit; error: fprintf(stderr, "[crypt_file_parallel] crypting '%s' failed: %s\n", path, strerror(errno)); success = false; exit: // Close input file if (file != NULL) { fclose(file); } return success; } int main(int argc, char** argv) { u8 key_buffer[CHACHA_KEY_SIZE] = { 0 }; bool success = true; if (argc < 2) { printf("Usage: %s input_file [password] [output_file]\n", argv[0]); goto exit; } const char* inpath = argv[1]; char* password = argc > 2 ? argv[2] : NULL; const char* outpath = argv[argc < 4 ? 1 : 3]; // Combine password with empty key buffer if (password != NULL) { size_t password_len = strlen(password); memxor(key_buffer, key_buffer, password, MIN(sizeof(key_buffer), password_len)); memwipe(password, password_len); } // Copy input file to output path if (inpath != outpath && strcmp(inpath, outpath) != 0) { printf("Copying '%s' to '%s'...", inpath, outpath); fflush(stdout); success = copy_file(inpath, outpath); printf(success ? " done.\n" : " failed.\n"); } // Crypt file in parallel using threads if (success) { printf("Crypting '%s' using ChaCha cipher...", outpath); fflush(stdout); success = crypt_file_parallel(outpath, key_buffer); printf(success ? " done.\n" : " failed.\n"); } // Securely wipe key buffer memwipe(key_buffer, sizeof(key_buffer)); exit: return success ? EXIT_SUCCESS : EXIT_FAILURE; }
config.h
/* config.h. Generated from config.in by configure. */ /* config.in. Generated from configure.ac by autoheader. */ /* Define if building universal (internal helper macro) */ /* #undef AC_APPLE_UNIVERSAL_BUILD */ /* Define to 1 if translation of program messages to the user's native language is requested. */ /* #undef ENABLE_NLS */ /* Define to enable linker plugins */ #define ENABLE_PLUGINS 1 /* Define to do multi-threaded linking */ /* #undef ENABLE_THREADS */ /* Default big endian (true or false) */ #define GOLD_DEFAULT_BIG_ENDIAN false /* Default machine code */ /* #undef GOLD_DEFAULT_MACHINE */ /* Default OSABI code */ #define GOLD_DEFAULT_OSABI ELFOSABI_NONE /* Default size (32 or 64) */ /* #undef GOLD_DEFAULT_SIZE */ /* Define to 1 if you have the <byteswap.h> header file. */ /* #undef HAVE_BYTESWAP_H */ /* Define to 1 if you have the `chsize' function. */ /* #undef HAVE_CHSIZE */ /* Define to 1 if you have the declaration of `asprintf', and to 0 if you don't. */ #define HAVE_DECL_ASPRINTF 1 /* Define to 1 if you have the declaration of `basename', and to 0 if you don't. */ #define HAVE_DECL_BASENAME 0 /* Define to 1 if you have the declaration of `ffs', and to 0 if you don't. */ #define HAVE_DECL_FFS 1 /* Define to 1 if you have the declaration of `memmem', and to 0 if you don't. */ #define HAVE_DECL_MEMMEM 1 /* Define to 1 if you have the declaration of `snprintf', and to 0 if you don't. */ #define HAVE_DECL_SNPRINTF 1 /* Define to 1 if you have the declaration of `strndup', and to 0 if you don't. */ #define HAVE_DECL_STRNDUP 1 /* Define to 1 if you have the declaration of `strverscmp', and to 0 if you don't. */ #define HAVE_DECL_STRVERSCMP 0 /* Define to 1 if you have the declaration of `vasprintf', and to 0 if you don't. */ #define HAVE_DECL_VASPRINTF 1 /* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you don't. */ #define HAVE_DECL_VSNPRINTF 1 /* Define to 1 if you have the <ext/hash_map> header file. */ #define HAVE_EXT_HASH_MAP 1 /* Define to 1 if you have the <ext/hash_set> header file. */ #define HAVE_EXT_HASH_SET 1 /* Define to 1 if you have the `ffsll' function. */ #define HAVE_FFSLL 1 /* Define to 1 if you have the `ftruncate' function. */ #define HAVE_FTRUNCATE 1 /* Define to 1 if you have the <inttypes.h> header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `mallinfo' function. */ /* #undef HAVE_MALLINFO */ /* Define to 1 if you have the <memory.h> header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mmap' function. */ #define HAVE_MMAP 1 /* Define to 1 if you have the mremap function with MREMAP_MAYMOVE support */ /* #undef HAVE_MREMAP */ /* Define if compiler supports #pragma omp threadprivate */ /* #undef HAVE_OMP_SUPPORT */ /* Define to 1 if you have the `posix_fallocate' function. */ /* #undef HAVE_POSIX_FALLOCATE */ /* Define to 1 if you have the `pread' function. */ #define HAVE_PREAD 1 /* Define to 1 if you have the `readv' function. */ #define HAVE_READV 1 /* Define if struct stat has a field st_mtim with timespec for mtime */ #define HAVE_STAT_ST_MTIM 1 /* Define to 1 if you have the <stdint.h> header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the <stdlib.h> header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the <strings.h> header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the <string.h> header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the `sysconf' function. */ #define HAVE_SYSCONF 1 /* Define to 1 if you have the <sys/mman.h> header file. */ #define HAVE_SYS_MMAN_H 1 /* Define to 1 if you have the <sys/stat.h> header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the <sys/types.h> header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to support 32-bit big-endian targets */ #define HAVE_TARGET_32_BIG 1 /* Define to support 32-bit little-endian targets */ #define HAVE_TARGET_32_LITTLE 1 /* Define to support 64-bit big-endian targets */ #define HAVE_TARGET_64_BIG 1 /* Define to support 64-bit little-endian targets */ #define HAVE_TARGET_64_LITTLE 1 /* Define if attributes work on C++ templates */ #define HAVE_TEMPLATE_ATTRIBUTES 1 /* Define to 1 if you have the `times' function. */ #define HAVE_TIMES 1 /* Define to 1 if you have the <tr1/unordered_map> header file. */ #define HAVE_TR1_UNORDERED_MAP 1 /* Define if ::std::tr1::unordered_map::rehash is usable */ #define HAVE_TR1_UNORDERED_MAP_REHASH 1 /* Define to 1 if you have the <tr1/unordered_set> header file. */ #define HAVE_TR1_UNORDERED_SET 1 /* Define to 1 if you have the <unistd.h> header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the <zlib.h> header file. */ #define HAVE_ZLIB_H 1 /* Name of package */ #define PACKAGE "gold" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "" /* Define to the full name of this package. */ #define PACKAGE_NAME "gold" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "gold 0.1" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "gold" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "0.1" /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* System root for target files */ #define TARGET_SYSTEM_ROOT "" /* Whether the system root can be relocated */ #define TARGET_SYSTEM_ROOT_RELOCATABLE 0 /* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # define _ALL_SOURCE 1 #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # define _POSIX_PTHREAD_SEMANTICS 1 #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # define _TANDEM_SOURCE 1 #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # define __EXTENSIONS__ 1 #endif /* Version number of package */ #define VERSION "0.1" /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN /* # undef WORDS_BIGENDIAN */ # endif #endif /* Define to 1 if on MINIX. */ /* #undef _MINIX */ /* Define to 2 if the system does not provide POSIX.1 features except with this defined. */ /* #undef _POSIX_1_SOURCE */ /* Define to 1 if you need to in order for `stat' and other things to work. */ /* #undef _POSIX_SOURCE */
sstruct_sharedDOFComm.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * OpenMP Problems * * Need to fix the way these variables are set and incremented in loops: * tot_nsendRowsNcols, send_ColsData_alloc, tot_sendColsData * ******************************************************************************/ #include "_hypre_sstruct_ls.h" /*-------------------------------------------------------------------------- * hypre_MaxwellOffProcRowCreate *--------------------------------------------------------------------------*/ hypre_MaxwellOffProcRow * hypre_MaxwellOffProcRowCreate(HYPRE_Int ncols) { hypre_MaxwellOffProcRow *OffProcRow; HYPRE_Int *cols; HYPRE_Real *data; OffProcRow= hypre_CTAlloc(hypre_MaxwellOffProcRow, 1); (OffProcRow -> ncols)= ncols; cols= hypre_TAlloc(HYPRE_Int, ncols); data= hypre_TAlloc(HYPRE_Real, ncols); (OffProcRow -> cols)= cols; (OffProcRow -> data)= data; return OffProcRow; } /*-------------------------------------------------------------------------- * hypre_MaxwellOffProcRowDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_MaxwellOffProcRowDestroy(void *OffProcRow_vdata) { hypre_MaxwellOffProcRow *OffProcRow= (hypre_MaxwellOffProcRow *)OffProcRow_vdata; HYPRE_Int ierr= 0; if (OffProcRow) { hypre_TFree(OffProcRow -> cols); hypre_TFree(OffProcRow -> data); } hypre_TFree(OffProcRow); return ierr; } /*-------------------------------------------------------------------------- * hypre_SStructSharedDOF_ParcsrMatRowsComm * Given a sstruct_grid & parcsr matrix with rows corresponding to the * sstruct_grid, determine and extract the rows that must be communicated. * These rows are for shared dof that geometrically lie on processor * boundaries but internally are stored on one processor. * Algo: * for each cellbox * RECVs: * i) stretch the cellbox to the variable box * ii) in the appropriate (dof-dependent) direction, take the * boundary and boxman_intersect to extract boxmanentries * that contain these boundary edges. * iii)loop over the boxmanentries and see if they belong * on this proc or another proc * a) if belong on another proc, these are the recvs: * count and prepare the communication buffers and * values. * * SENDs: * i) form layer of cells that is one layer off cellbox * (stretches in the appropriate direction) * ii) boxman_intersect with the cellgrid boxman * iii)loop over the boxmanentries and see if they belong * on this proc or another proc * a) if belong on another proc, these are the sends: * count and prepare the communication buffers and * values. * * Note: For the recv data, the dof can come from only one processor. * For the send data, the dof can go to more than one processor * (the same dof is on the boundary of several cells). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructSharedDOF_ParcsrMatRowsComm( hypre_SStructGrid *grid, hypre_ParCSRMatrix *A, HYPRE_Int *num_offprocrows_ptr, hypre_MaxwellOffProcRow ***OffProcRows_ptr) { MPI_Comm A_comm= hypre_ParCSRMatrixComm(A); MPI_Comm grid_comm= hypre_SStructGridComm(grid); HYPRE_Int matrix_type= HYPRE_PARCSR; HYPRE_Int nparts= hypre_SStructGridNParts(grid); HYPRE_Int ndim = hypre_SStructGridNDim(grid); hypre_SStructGrid *cell_ssgrid; hypre_SStructPGrid *pgrid; hypre_StructGrid *cellgrid; hypre_BoxArray *cellboxes; hypre_Box *box, *cellbox, vbox, boxman_entry_box; hypre_Index loop_size, start, lindex; HYPRE_Int start_rank, end_rank, rank; HYPRE_Int i, j, k, m, n, t, part, var, nvars; HYPRE_SStructVariable *vartypes; HYPRE_Int nbdry_slabs; hypre_BoxArray *recv_slabs, *send_slabs; hypre_Index varoffset; hypre_BoxManager **boxmans, *cell_boxman; hypre_BoxManEntry **boxman_entries, *entry; HYPRE_Int nboxman_entries; hypre_Index ilower, iupper, index; HYPRE_Int proc, nprocs, myproc; HYPRE_Int *SendToProcs, *RecvFromProcs; HYPRE_Int **send_RowsNcols; /* buffer for rows & ncols */ HYPRE_Int *send_RowsNcols_alloc; HYPRE_Int *send_ColsData_alloc; HYPRE_Int *tot_nsendRowsNcols, *tot_sendColsData; HYPRE_Real **vals; /* buffer for cols & data */ HYPRE_Int *col_inds; HYPRE_Real *values; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int **rbuffer_RowsNcols; HYPRE_Real **rbuffer_ColsData; HYPRE_Int num_sends, num_recvs; hypre_MaxwellOffProcRow **OffProcRows; HYPRE_Int *starts; HYPRE_Int ierr= 0; hypre_BoxInit(&vbox, ndim); hypre_BoxInit(&boxman_entry_box, ndim); hypre_MPI_Comm_rank(A_comm, &myproc); hypre_MPI_Comm_size(grid_comm, &nprocs); start_rank= hypre_ParCSRMatrixFirstRowIndex(A); end_rank = hypre_ParCSRMatrixLastRowIndex(A); /* need a cellgrid boxman to determine the send boxes -> only the cell dofs are unique so a boxman intersect can be used to get the edges that must be sent. */ HYPRE_SStructGridCreate(grid_comm, ndim, nparts, &cell_ssgrid); vartypes= hypre_CTAlloc(HYPRE_SStructVariable, 1); vartypes[0]= HYPRE_SSTRUCT_VARIABLE_CELL; for (i= 0; i< nparts; i++) { pgrid= hypre_SStructGridPGrid(grid, i); cellgrid= hypre_SStructPGridCellSGrid(pgrid); cellboxes= hypre_StructGridBoxes(cellgrid); hypre_ForBoxI(j, cellboxes) { box= hypre_BoxArrayBox(cellboxes, j); HYPRE_SStructGridSetExtents(cell_ssgrid, i, hypre_BoxIMin(box), hypre_BoxIMax(box)); } HYPRE_SStructGridSetVariables(cell_ssgrid, i, 1, vartypes); } HYPRE_SStructGridAssemble(cell_ssgrid); hypre_TFree(vartypes); /* box algebra to determine communication */ SendToProcs = hypre_CTAlloc(HYPRE_Int, nprocs); RecvFromProcs = hypre_CTAlloc(HYPRE_Int, nprocs); send_RowsNcols = hypre_TAlloc(HYPRE_Int *, nprocs); send_RowsNcols_alloc= hypre_TAlloc(HYPRE_Int , nprocs); send_ColsData_alloc = hypre_TAlloc(HYPRE_Int , nprocs); vals = hypre_TAlloc(HYPRE_Real *, nprocs); tot_nsendRowsNcols = hypre_CTAlloc(HYPRE_Int, nprocs); tot_sendColsData = hypre_CTAlloc(HYPRE_Int, nprocs); for (i= 0; i< nprocs; i++) { send_RowsNcols[i]= hypre_TAlloc(HYPRE_Int, 1000); /* initial allocation */ send_RowsNcols_alloc[i]= 1000; vals[i]= hypre_TAlloc(HYPRE_Real, 2000); /* initial allocation */ send_ColsData_alloc[i]= 2000; } for (part= 0; part< nparts; part++) { pgrid= hypre_SStructGridPGrid(grid, part); nvars= hypre_SStructPGridNVars(pgrid); vartypes= hypre_SStructPGridVarTypes(pgrid); cellgrid = hypre_SStructPGridCellSGrid(pgrid); cellboxes= hypre_StructGridBoxes(cellgrid); boxmans= hypre_TAlloc(hypre_BoxManager *, nvars); for (t= 0; t< nvars; t++) { boxmans[t]= hypre_SStructGridBoxManager(grid, part, t); } cell_boxman= hypre_SStructGridBoxManager(cell_ssgrid, part, 0); hypre_ForBoxI(j, cellboxes) { cellbox= hypre_BoxArrayBox(cellboxes, j); for (t= 0; t< nvars; t++) { var= vartypes[t]; hypre_SStructVariableGetOffset((hypre_SStructVariable) var, ndim, varoffset); /* form the variable cellbox */ hypre_CopyBox(cellbox, &vbox); hypre_SubtractIndexes(hypre_BoxIMin(&vbox), varoffset, 3, hypre_BoxIMin(&vbox)); /* boundary layer box depends on variable type */ switch(var) { case 1: /* node based */ { nbdry_slabs= 6; recv_slabs = hypre_BoxArrayCreate(nbdry_slabs, ndim); /* slab in the +/- i,j,k directions */ box= hypre_BoxArrayBox(recv_slabs, 0); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0]; box= hypre_BoxArrayBox(recv_slabs, 1); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0]; /* need to contract the slab in the i direction to avoid repeated counting of some nodes. */ box= hypre_BoxArrayBox(recv_slabs, 2); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1]; hypre_BoxIMin(box)[0]++; /* contract */ hypre_BoxIMax(box)[0]--; /* contract */ box= hypre_BoxArrayBox(recv_slabs, 3); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1]; hypre_BoxIMin(box)[0]++; /* contract */ hypre_BoxIMax(box)[0]--; /* contract */ /* need to contract the slab in the i & j directions to avoid repeated counting of some nodes. */ box= hypre_BoxArrayBox(recv_slabs, 4); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2]; hypre_BoxIMin(box)[0]++; /* contract */ hypre_BoxIMax(box)[0]--; /* contract */ hypre_BoxIMin(box)[1]++; /* contract */ hypre_BoxIMax(box)[1]--; /* contract */ box= hypre_BoxArrayBox(recv_slabs, 5); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2]; hypre_BoxIMin(box)[0]++; /* contract */ hypre_BoxIMax(box)[0]--; /* contract */ hypre_BoxIMin(box)[1]++; /* contract */ hypre_BoxIMax(box)[1]--; /* contract */ /* send boxes are cell-based stretching out of cellbox - i.e., cells that have these edges as boundary */ send_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); box= hypre_BoxArrayBox(send_slabs, 0); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[0]++; hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0]; hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/ hypre_BoxIMin(box)[2]--; hypre_BoxIMax(box)[1]++; /* stretch one layer +/- j*/ hypre_BoxIMin(box)[1]--; box= hypre_BoxArrayBox(send_slabs, 1); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[0]--; hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0]; hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/ hypre_BoxIMin(box)[2]--; hypre_BoxIMax(box)[1]++; /* stretch one layer +/- j*/ hypre_BoxIMin(box)[1]--; box= hypre_BoxArrayBox(send_slabs, 2); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[1]++; hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1]; hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/ hypre_BoxIMin(box)[2]--; box= hypre_BoxArrayBox(send_slabs, 3); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[1]--; hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1]; hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/ hypre_BoxIMin(box)[2]--; box= hypre_BoxArrayBox(send_slabs, 4); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[2]++; hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2]; box= hypre_BoxArrayBox(send_slabs, 5); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[2]--; hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2]; break; } case 2: /* x-face based */ { nbdry_slabs= 2; recv_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); /* slab in the +/- i direction */ box= hypre_BoxArrayBox(recv_slabs, 0); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0]; box= hypre_BoxArrayBox(recv_slabs, 1); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0]; /* send boxes are cell-based stretching out of cellbox - i.e., cells that have these edges as boundary */ send_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); box= hypre_BoxArrayBox(send_slabs, 0); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[0]++; hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0]; box= hypre_BoxArrayBox(send_slabs, 1); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[0]--; hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0]; break; } case 3: /* y-face based */ { nbdry_slabs= 2; recv_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); /* slab in the +/- j direction */ box= hypre_BoxArrayBox(recv_slabs, 0); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1]; box= hypre_BoxArrayBox(recv_slabs, 1); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1]; /* send boxes are cell-based stretching out of cellbox - i.e., cells that have these edges as boundary */ send_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); box= hypre_BoxArrayBox(send_slabs, 0); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[1]++; hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1]; box= hypre_BoxArrayBox(send_slabs, 1); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[1]--; hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1]; break; } case 4: /* z-face based */ { nbdry_slabs= 2; recv_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); /* slab in the +/- k direction */ box= hypre_BoxArrayBox(recv_slabs, 0); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2]; box= hypre_BoxArrayBox(recv_slabs, 1); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2]; /* send boxes are cell-based stretching out of cellbox - i.e., cells that have these edges as boundary */ send_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); box= hypre_BoxArrayBox(send_slabs, 0); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[2]++; hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2]; box= hypre_BoxArrayBox(send_slabs, 1); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[2]--; hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2]; break; } case 5: /* x-edge based */ { nbdry_slabs= 4; recv_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); /* slab in the +/- j & k direction */ box= hypre_BoxArrayBox(recv_slabs, 0); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1]; box= hypre_BoxArrayBox(recv_slabs, 1); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1]; /* need to contract the slab in the j direction to avoid repeated counting of some x-edges. */ box= hypre_BoxArrayBox(recv_slabs, 2); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2]; hypre_BoxIMin(box)[1]++; /* contract */ hypre_BoxIMax(box)[1]--; /* contract */ box= hypre_BoxArrayBox(recv_slabs, 3); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2]; hypre_BoxIMin(box)[1]++; /* contract */ hypre_BoxIMax(box)[1]--; /* contract */ /* send boxes are cell-based stretching out of cellbox - i.e., cells that have these edges as boundary */ send_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); box= hypre_BoxArrayBox(send_slabs, 0); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[1]++; hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1]; hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/ hypre_BoxIMin(box)[2]--; box= hypre_BoxArrayBox(send_slabs, 1); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[1]--; hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1]; hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/ hypre_BoxIMin(box)[2]--; box= hypre_BoxArrayBox(send_slabs, 2); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[2]++; hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2]; box= hypre_BoxArrayBox(send_slabs, 3); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[2]--; hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2]; break; } case 6: /* y-edge based */ { nbdry_slabs= 4; recv_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); /* slab in the +/- i & k direction */ box= hypre_BoxArrayBox(recv_slabs, 0); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0]; box= hypre_BoxArrayBox(recv_slabs, 1); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0]; /* need to contract the slab in the i direction to avoid repeated counting of some y-edges. */ box= hypre_BoxArrayBox(recv_slabs, 2); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2]; hypre_BoxIMin(box)[0]++; /* contract */ hypre_BoxIMax(box)[0]--; /* contract */ box= hypre_BoxArrayBox(recv_slabs, 3); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2]; hypre_BoxIMin(box)[0]++; /* contract */ hypre_BoxIMax(box)[0]--; /* contract */ /* send boxes are cell-based stretching out of cellbox - i.e., cells that have these edges as boundary */ send_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); box= hypre_BoxArrayBox(send_slabs, 0); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[0]++; hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0]; hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/ hypre_BoxIMin(box)[2]--; box= hypre_BoxArrayBox(send_slabs, 1); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[0]--; hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0]; hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/ hypre_BoxIMin(box)[2]--; box= hypre_BoxArrayBox(send_slabs, 2); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[2]++; hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2]; box= hypre_BoxArrayBox(send_slabs, 3); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[2]--; hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2]; break; } case 7: /* z-edge based */ { nbdry_slabs= 4; recv_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); /* slab in the +/- i & j direction */ box= hypre_BoxArrayBox(recv_slabs, 0); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0]; box= hypre_BoxArrayBox(recv_slabs, 1); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0]; /* need to contract the slab in the i direction to avoid repeated counting of some z-edges. */ box= hypre_BoxArrayBox(recv_slabs, 2); hypre_CopyBox(&vbox, box); hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1]; hypre_BoxIMin(box)[0]++; /* contract */ hypre_BoxIMax(box)[0]--; /* contract */ box= hypre_BoxArrayBox(recv_slabs, 3); hypre_CopyBox(&vbox, box); hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1]; hypre_BoxIMin(box)[0]++; /* contract */ hypre_BoxIMax(box)[0]--; /* contract */ /* send boxes are cell-based stretching out of cellbox - i.e., cells that have these edges as boundary */ send_slabs= hypre_BoxArrayCreate(nbdry_slabs, ndim); box= hypre_BoxArrayBox(send_slabs, 0); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[1]++; hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1]; hypre_BoxIMax(box)[0]++; /* stretch one layer +/- i*/ hypre_BoxIMin(box)[0]--; box= hypre_BoxArrayBox(send_slabs, 1); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[1]--; hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1]; hypre_BoxIMax(box)[0]++; /* stretch one layer +/- i*/ hypre_BoxIMin(box)[0]--; box= hypre_BoxArrayBox(send_slabs, 2); hypre_CopyBox(cellbox, box); hypre_BoxIMax(box)[0]++; hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0]; box= hypre_BoxArrayBox(send_slabs, 3); hypre_CopyBox(cellbox, box); hypre_BoxIMin(box)[0]--; hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0]; break; } } /* switch(var) */ /* determine no. of recv rows */ for (i= 0; i< nbdry_slabs; i++) { box= hypre_BoxArrayBox(recv_slabs, i); hypre_BoxManIntersect(boxmans[t], hypre_BoxIMin(box), hypre_BoxIMax(box), &boxman_entries, &nboxman_entries); for (m= 0; m< nboxman_entries; m++) { hypre_SStructBoxManEntryGetProcess(boxman_entries[m], &proc); if (proc != myproc) { hypre_BoxManEntryGetExtents(boxman_entries[m], ilower, iupper); hypre_BoxSetExtents(&boxman_entry_box, ilower, iupper); hypre_IntersectBoxes(&boxman_entry_box, box, &boxman_entry_box); RecvFromProcs[proc]+= hypre_BoxVolume(&boxman_entry_box); } } hypre_TFree(boxman_entries); /* determine send rows. Note the cell_boxman */ box= hypre_BoxArrayBox(send_slabs, i); hypre_BoxManIntersect(cell_boxman, hypre_BoxIMin(box), hypre_BoxIMax(box), &boxman_entries, &nboxman_entries); for (m= 0; m< nboxman_entries; m++) { hypre_SStructBoxManEntryGetProcess(boxman_entries[m], &proc); if (proc != myproc) { hypre_BoxManEntryGetExtents(boxman_entries[m], ilower, iupper); hypre_BoxSetExtents(&boxman_entry_box, ilower, iupper); hypre_IntersectBoxes(&boxman_entry_box, box, &boxman_entry_box); /* not correct box piece right now. Need to determine the correct var box - extend to var_box and then intersect with vbox */ hypre_SubtractIndexes(hypre_BoxIMin(&boxman_entry_box), varoffset, 3, hypre_BoxIMin(&boxman_entry_box)); hypre_IntersectBoxes(&boxman_entry_box, &vbox, &boxman_entry_box); SendToProcs[proc]+= 2*hypre_BoxVolume(&boxman_entry_box); /* check to see if sufficient memory allocation for send_rows */ if (SendToProcs[proc] > send_RowsNcols_alloc[proc]) { send_RowsNcols_alloc[proc]= SendToProcs[proc]; send_RowsNcols[proc]= hypre_TReAlloc(send_RowsNcols[proc], HYPRE_Int, send_RowsNcols_alloc[proc]); } hypre_BoxGetSize(&boxman_entry_box, loop_size); hypre_CopyIndex(hypre_BoxIMin(&boxman_entry_box), start); hypre_BoxLoop0Begin(ndim, loop_size); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,index,entry,rank,tot_nsendRowsNcols,n,col_inds,values,send_ColsData_alloc,k,tot_sendColsData) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop0For() { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(index, lindex[0], lindex[1], lindex[2]); hypre_AddIndexes(index, start, 3, index); hypre_SStructGridFindBoxManEntry(grid, part, index, t, &entry); if (entry) { hypre_SStructBoxManEntryGetGlobalRank(entry, index, &rank, matrix_type); /* index may still be off myproc because vbox was formed by expanding the cellbox to the variable box without checking (difficult) the whole expanded box is on myproc */ if (rank <= end_rank && rank >= start_rank) { send_RowsNcols[proc][tot_nsendRowsNcols[proc]]= rank; tot_nsendRowsNcols[proc]++; HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) A, rank, &n, &col_inds, &values); send_RowsNcols[proc][tot_nsendRowsNcols[proc]]= n; tot_nsendRowsNcols[proc]++; /* check if sufficient memory allocation in the data arrays */ if ( (tot_sendColsData[proc]+2*n) > send_ColsData_alloc[proc] ) { send_ColsData_alloc[proc]+= 2000; vals[proc]= hypre_TReAlloc(vals[proc], HYPRE_Real, send_ColsData_alloc[proc]); } for (k= 0; k< n; k++) { vals[proc][tot_sendColsData[proc]]= (HYPRE_Real) col_inds[k]; tot_sendColsData[proc]++; vals[proc][tot_sendColsData[proc]]= values[k]; tot_sendColsData[proc]++; } HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) A, rank, &n, &col_inds, &values); } /* if (rank <= end_rank && rank >= start_rank) */ } /* if (entry) */ } hypre_BoxLoop0End(); } /* if (proc != myproc) */ } /* for (m= 0; m< nboxman_entries; m++) */ hypre_TFree(boxman_entries); } /* for (i= 0; i< nbdry_slabs; i++) */ hypre_BoxArrayDestroy(send_slabs); hypre_BoxArrayDestroy(recv_slabs); } /* for (t= 0; t< nvars; t++) */ } /* hypre_ForBoxI(j, cellboxes) */ hypre_TFree(boxmans); } /* for (part= 0; part< nparts; part++) */ HYPRE_SStructGridDestroy(cell_ssgrid); num_sends= 0; num_recvs= 0; k= 0; starts= hypre_CTAlloc(HYPRE_Int, nprocs+1); for (i= 0; i< nprocs; i++) { starts[i+1]= starts[i]+RecvFromProcs[i]; if (RecvFromProcs[i]) { num_recvs++; k+= RecvFromProcs[i]; } if (tot_sendColsData[i]) { num_sends++; } } OffProcRows= hypre_TAlloc(hypre_MaxwellOffProcRow *, k); *num_offprocrows_ptr= k; requests= hypre_CTAlloc(hypre_MPI_Request, num_sends+num_recvs); status = hypre_CTAlloc(hypre_MPI_Status, num_sends+num_recvs); /* send row size data */ j= 0; rbuffer_RowsNcols= hypre_TAlloc(HYPRE_Int *, nprocs); rbuffer_ColsData = hypre_TAlloc(HYPRE_Real *, nprocs); for (proc= 0; proc< nprocs; proc++) { if (RecvFromProcs[proc]) { rbuffer_RowsNcols[proc]= hypre_TAlloc(HYPRE_Int, 2*RecvFromProcs[proc]); hypre_MPI_Irecv(rbuffer_RowsNcols[proc], 2*RecvFromProcs[proc], HYPRE_MPI_INT, proc, 0, grid_comm, &requests[j++]); } /* if (RecvFromProcs[proc]) */ } /* for (proc= 0; proc< nprocs; proc++) */ for (proc= 0; proc< nprocs; proc++) { if (tot_nsendRowsNcols[proc]) { hypre_MPI_Isend(send_RowsNcols[proc], tot_nsendRowsNcols[proc], HYPRE_MPI_INT, proc, 0, grid_comm, &requests[j++]); } } hypre_MPI_Waitall(j, requests, status); /* unpack data */ for (proc= 0; proc< nprocs; proc++) { send_RowsNcols_alloc[proc]= 0; if (RecvFromProcs[proc]) { m= 0; ; for (i= 0; i< RecvFromProcs[proc]; i++) { /* rbuffer_RowsNcols[m] has the row & rbuffer_RowsNcols[m+1] the col size */ OffProcRows[starts[proc]+i]= hypre_MaxwellOffProcRowCreate(rbuffer_RowsNcols[proc][m+1]); (OffProcRows[starts[proc]+i] -> row) = rbuffer_RowsNcols[proc][m]; (OffProcRows[starts[proc]+i] -> ncols)= rbuffer_RowsNcols[proc][m+1]; send_RowsNcols_alloc[proc]+= rbuffer_RowsNcols[proc][m+1]; m+= 2; } rbuffer_ColsData[proc]= hypre_TAlloc(HYPRE_Real, 2*send_RowsNcols_alloc[proc]); hypre_TFree(rbuffer_RowsNcols[proc]); } } hypre_TFree(rbuffer_RowsNcols); hypre_TFree(requests); hypre_TFree(status); requests= hypre_CTAlloc(hypre_MPI_Request, num_sends+num_recvs); status = hypre_CTAlloc(hypre_MPI_Status, num_sends+num_recvs); /* send row data */ j= 0; for (proc= 0; proc< nprocs; proc++) { if (RecvFromProcs[proc]) { hypre_MPI_Irecv(rbuffer_ColsData[proc], 2*send_RowsNcols_alloc[proc], HYPRE_MPI_REAL, proc, 1, grid_comm, &requests[j++]); } /* if (RecvFromProcs[proc]) */ } /* for (proc= 0; proc< nprocs; proc++) */ for (proc= 0; proc< nprocs; proc++) { if (tot_sendColsData[proc]) { hypre_MPI_Isend(vals[proc], tot_sendColsData[proc], HYPRE_MPI_REAL, proc, 1, grid_comm, &requests[j++]); } } hypre_MPI_Waitall(j, requests, status); /* unpack data */ for (proc= 0; proc< nprocs; proc++) { if (RecvFromProcs[proc]) { k= 0; for (i= 0; i< RecvFromProcs[proc]; i++) { col_inds= (OffProcRows[starts[proc]+i] -> cols); values = (OffProcRows[starts[proc]+i] -> data); m = (OffProcRows[starts[proc]+i] -> ncols); for (t= 0; t< m; t++) { col_inds[t]= (HYPRE_Int) rbuffer_ColsData[proc][k++]; values[t] = rbuffer_ColsData[proc][k++]; } } hypre_TFree(rbuffer_ColsData[proc]); } /* if (RecvFromProcs[proc]) */ } /* for (proc= 0; proc< nprocs; proc++) */ hypre_TFree(rbuffer_ColsData); hypre_TFree(requests); hypre_TFree(status); for (proc= 0; proc< nprocs; proc++) { hypre_TFree(send_RowsNcols[proc]); hypre_TFree(vals[proc]); } hypre_TFree(send_RowsNcols); hypre_TFree(vals); hypre_TFree(tot_sendColsData); hypre_TFree(tot_nsendRowsNcols); hypre_TFree(send_ColsData_alloc); hypre_TFree(send_RowsNcols_alloc); hypre_TFree(SendToProcs); hypre_TFree(RecvFromProcs); hypre_TFree(starts); *OffProcRows_ptr= OffProcRows; return ierr; }
LAGraph_matrix_extract_keep_dimensions.c
//------------------------------------------------------------------------------ // LAGraph_matrix_extract_keep_dimensions: extract submatrix but keep the // dimensions of the original matrix // ------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_Matrix_extract_keep_dimensions: Contributed by Gabor Szarnyas. // Budapest University of Technology and Economics // (with accented characters: G\'{a}bor Sz\'{a}rnyas). // Compute the #include "LAGraph_internal.h" #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE (C) ; \ LAGRAPH_FREE (type) ; \ } typedef struct { const GrB_Index nv; // number of vertices const bool* Vdense; // array denoting whether a vertex should be kept } Vdense_struct_type; bool select_submatrix_elements_fun(const GrB_Index i, const GrB_Index j, const void *x, const void *thunk) ; bool select_submatrix_elements_fun(const GrB_Index i, const GrB_Index j, const void *x, const void *thunk) { Vdense_struct_type* indices = (Vdense_struct_type*) (thunk); return indices->Vdense[i] && indices->Vdense[j]; } //------------------------------------------------------------------------------ GrB_Info LAGraph_Matrix_extract_keep_dimensions // extract submatrix but keep // the dimensions of the // original matrix ( GrB_Matrix *Chandle, // output matrix const GrB_Matrix A, // input matrix const GrB_Index *Vsparse, // sorted list of vertex indices const bool *Vdense, // boolean array of vertices GrB_Index nv // number of vertex indices ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Type type; GrB_Index n ; GrB_Matrix C = NULL ; LAGr_Matrix_type(&type, A) LAGr_Matrix_nrows (&n, A) LAGr_Matrix_new (&C, type, n, n) if (Vsparse == NULL && Vdense == NULL) { LAGRAPH_ERROR("Both Vsparse and Vdense are set to NULL", GrB_NULL_POINTER) } if (Vsparse == NULL) // use Vdense and GxB_select { Vdense_struct_type vdense_struct = {.nv = nv, .Vdense = Vdense}; GrB_Type Vdense_type; LAGr_Type_new(&Vdense_type, sizeof(vdense_struct)) GxB_Scalar vdense_thunk; LAGr_Scalar_new(&vdense_thunk, Vdense_type) LAGr_Scalar_setElement(vdense_thunk, (void*) &vdense_struct) GxB_SelectOp select_submatrix_elements_op; LAGr_SelectOp_new(&select_submatrix_elements_op, select_submatrix_elements_fun, NULL, Vdense_type) LAGr_select(C, NULL, NULL, select_submatrix_elements_op, A, vdense_thunk, NULL) LAGRAPH_FREE(select_submatrix_elements_op) LAGRAPH_FREE(vdense_thunk) LAGRAPH_FREE(Vdense_type) } else { GrB_Matrix D; // diagonal matrix used to select rows/columns LAGr_Matrix_new(&D, GrB_BOOL, n, n); bool* X = LAGraph_malloc(nv, sizeof(GrB_BOOL)) ; if (X == NULL) { LAGRAPH_ERROR("out of memory", GrB_OUT_OF_MEMORY) } int nthreads = LAGraph_get_nthreads( ) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index i = 0; i < nv; i++) { X[i] = true; } LAGr_Matrix_build(D, Vsparse, Vsparse, X, nv, GrB_LOR) GxB_Format_Value A_format; LAGRAPH_OK(GxB_get(A, GxB_FORMAT, &A_format)) if (A_format == GxB_BY_ROW) // C = (D*A)*D { LAGr_mxm(C, NULL, NULL, GxB_ANY_SECOND_FP64, D, A, NULL) LAGr_mxm(C, NULL, NULL, GxB_ANY_FIRST_FP64, C, D, NULL) } else // A_format == GxB_BY_COL: C = D*(A*D) { LAGr_mxm(C, NULL, NULL, GxB_ANY_FIRST_FP64, A, D, NULL) LAGr_mxm(C, NULL, NULL, GxB_ANY_SECOND_FP64, D, C, NULL) } LAGRAPH_FREE(D); } (*Chandle) = C ; return (GrB_SUCCESS) ; }
test_tasks.c
//===-- test_tasks.c - Test task creation and execution ----------*- C -*-===// // // Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include <stdio.h> #include <stdlib.h> #define NTASKS 33 int main(void) { int count = 0; #pragma omp parallel shared(count) #pragma omp master { for (int i = 0; i < NTASKS; i++) { #pragma omp task shared(count) { #pragma omp atomic count++; } } } int failed = (count != NTASKS); printf("Got %d tasks, should be %d\n" "***%s***\n", count, NTASKS, failed ? "FAILED" : "PASSED"); return failed ? EXIT_FAILURE : EXIT_SUCCESS; ; }
core_dzamax.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ void plasma_core_omp_dzamax(int colrow, int m, int n, const plasma_complex64_t *A, int lda, double *values, plasma_sequence_t *sequence, plasma_request_t *request) { switch (colrow) { case PlasmaColumnwise: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:values[0:n]) { if (sequence->status == PlasmaSuccess) { for (int j = 0; j < n; j++) { values[j] = plasma_core_dcabs1(A[lda*j]); for (int i = 1; i < m; i++) { double tmp = plasma_core_dcabs1(A[lda*j+i]); if (tmp > values[j]) values[j] = tmp; } } } } break; case PlasmaRowwise: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:values[0:m]) { if (sequence->status == PlasmaSuccess) { for (int i = 0; i < m; i++) values[i] = plasma_core_dcabs1(A[i]); for (int j = 1; j < n; j++) { for (int i = 0; i < m; i++) { double tmp = plasma_core_dcabs1(A[lda*j+i]); if (tmp > values[i]) values[i] = tmp; } } } } break; } }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
api.c
// RUN: %libomptarget-compile-run-and-check-generic // XFAIL: nvptx64-nvidia-cuda // Fails on amdgcn with error: GPU Memory Error // XFAIL: amdgcn-amd-amdhsa #include <stdio.h> #include <omp.h> // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL extern void __tgt_register_requires(int64_t); // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- #pragma omp requires unified_shared_memory #define N 1024 void init(int A[], int B[], int C[]) { for (int i = 0; i < N; ++i) { A[i] = 0; B[i] = 1; C[i] = i; } } int main(int argc, char *argv[]) { const int device = omp_get_default_device(); // Manual registration of requires flags for Clang versions // that do not support requires. __tgt_register_requires(8); // CHECK: Initial device: [[INITIAL_DEVICE:[0-9]+]] printf("Initial device: %d\n", omp_get_initial_device()); // CHECK: Num devices: [[INITIAL_DEVICE]] printf("Num devices: %d\n", omp_get_num_devices()); // // Target alloc & target memcpy // int A[N], B[N], C[N]; // Init init(A, B, C); int *pA, *pB, *pC; // map ptrs pA = &A[0]; pB = &B[0]; pC = &C[0]; int *d_A = (int *)omp_target_alloc(N * sizeof(int), device); int *d_B = (int *)omp_target_alloc(N * sizeof(int), device); int *d_C = (int *)omp_target_alloc(N * sizeof(int), device); // CHECK: omp_target_alloc succeeded printf("omp_target_alloc %s\n", d_A && d_B && d_C ? "succeeded" : "failed"); omp_target_memcpy(d_B, pB, N * sizeof(int), 0, 0, device, omp_get_initial_device()); omp_target_memcpy(d_C, pC, N * sizeof(int), 0, 0, device, omp_get_initial_device()); #pragma omp target is_device_ptr(d_A, d_B, d_C) device(device) { #pragma omp parallel for schedule(static, 1) for (int i = 0; i < N; i++) { d_A[i] = d_B[i] + d_C[i] + 1; } } omp_target_memcpy(pA, d_A, N * sizeof(int), 0, 0, omp_get_initial_device(), device); // CHECK: Test omp_target_memcpy: Succeeded int fail = 0; for (int i = 0; i < N; ++i) { if (A[i] != i + 2) fail++; } if (fail) { printf("Test omp_target_memcpy: Failed\n"); } else { printf("Test omp_target_memcpy: Succeeded\n"); } // // target_is_present and target_associate/disassociate_ptr // init(A, B, C); // CHECK: B is not present, associating it... // CHECK: omp_target_associate_ptr B succeeded if (!omp_target_is_present(B, device)) { printf("B is not present, associating it...\n"); int rc = omp_target_associate_ptr(B, d_B, N * sizeof(int), 0, device); printf("omp_target_associate_ptr B %s\n", !rc ? "succeeded" : "failed"); } // CHECK: C is not present, associating it... // CHECK: omp_target_associate_ptr C succeeded if (!omp_target_is_present(C, device)) { printf("C is not present, associating it...\n"); int rc = omp_target_associate_ptr(C, d_C, N * sizeof(int), 0, device); printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed"); } // CHECK: Inside target data: A is not present // CHECK: Inside target data: B is present // CHECK: Inside target data: C is present #pragma omp target data map(from : B, C) device(device) { printf("Inside target data: A is%s present\n", omp_target_is_present(A, device) ? "" : " not"); printf("Inside target data: B is%s present\n", omp_target_is_present(B, device) ? "" : " not"); printf("Inside target data: C is%s present\n", omp_target_is_present(C, device) ? "" : " not"); #pragma omp target map(from : A) device(device) { #pragma omp parallel for schedule(static, 1) for (int i = 0; i < N; i++) A[i] = B[i] + C[i] + 1; } } // CHECK: B is present, disassociating it... // CHECK: omp_target_disassociate_ptr B succeeded // CHECK: C is present, disassociating it... // CHECK: omp_target_disassociate_ptr C succeeded if (omp_target_is_present(B, device)) { printf("B is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(B, device); printf("omp_target_disassociate_ptr B %s\n", !rc ? "succeeded" : "failed"); } if (omp_target_is_present(C, device)) { printf("C is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(C, device); printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed"); } // CHECK: Test omp_target_associate_ptr: Succeeded fail = 0; for (int i = 0; i < N; ++i) { if (A[i] != i + 2) fail++; } if (fail) { printf("Test omp_target_associate_ptr: Failed\n"); } else { printf("Test omp_target_associate_ptr: Succeeded\n"); } omp_target_free(d_A, device); omp_target_free(d_B, device); omp_target_free(d_C, device); printf("Done!\n"); return 0; }
plm.c
/* * plmc * Copyright (c) 2016, John Ingraham * john.ingraham@gmail.com */ #include <stdlib.h> #include <ctype.h> #include <math.h> #include <stdio.h> #include <stdint.h> #include <sys/time.h> #include <assert.h> #include <string.h> /* Optionally include OpenMP with the -fopenmp flag */ #if defined(_OPENMP) #include <omp.h> #endif #include "include/twister.h" #include "include/plm.h" #include "include/inference.h" /* Usage pattern */ const char *usage = "plmc\n" "\n" "Usage:\n" " plm [options] alignmentfile\n" " plm -c couplingsfile alignmentfile\n" " plm -o paramfile -c couplingsfile alignmentfile\n" " plm [-h | --help]\n" " \n" " Required input:\n" " alignmentfile Multiple sequence alignment in FASTA format\n" "\n" " Options, output:\n" " -c --couplings couplingsfile Save coupling scores to file (text)\n" " -o --output paramfile Save estimated parameters to file (binary)\n" "\n" " Options, alignment processing:\n" " -s --scale <value> Sequence weights: neighborhood weight [s > 0]\n" " -t --theta <value> Sequence weights: neighborhood divergence [0 < t < 1]\n" "\n" " Options, Maximum a posteriori estimation (L-BFGS, default):\n" " -lh --lambdah <value> Set L2 lambda for fields (h_i)\n" " -le --lambdae <value> Set L2 lambda for couplings (e_ij)\n" " -lg --lambdag <value> Set group L1 lambda for couplings (e_ij)\n" "\n" " Options, general:\n" " --fast Fast weights and stochastic gradient descent\n" " -a --alphabet alphabet Alternative character set to use for analysis\n" " -f --focus identifier Select only uppercase, non-gapped sites from a focus sequence\n" " -g --gapignore Model sequence likelihoods only by coding, non-gapped portions\n" " -m --maxiter Maximum number of iterations\n" " -n --ncores [<number>|max] Maximum number of threads to use in OpenMP\n" " -h --help Usage\n\n"; /* Internal functions to MSARead */ void MSAReadSeq(char *seq, FILE *fpAli); letter_t MSAReadCode(char c, char *alphabet, int nCodes); /* Global verbosity & profiling options */ int verbose = 2; /* Reference amino acid indexing */ const char *codesAA = "-ACDEFGHIKLMNPQRSTVWY"; /* Default parameters */ const numeric_t REGULARIZATION_LAMBDA_H = 0.01; const numeric_t REGULARIZATION_LAMBDA_E = 100.0; const numeric_t REGULARIZATION_LAMBDA_GROUP = 0.0; const numeric_t REWEIGHTING_THETA = 0.20; const numeric_t REWEIGHTING_SCALE = 1.0; const int ZERO_APC_PRIORS = 0; const int SGD_BATCH_SIZE = 2048; const int REWEIGHTING_SAMPLES = 5000; int main(int argc, char **argv) { char *alignFile = NULL; char *outputFile = NULL; char *couplingsFile = NULL; /* Default options */ options_t *options = (options_t *) malloc(sizeof(options_t)); options->lambdaH = REGULARIZATION_LAMBDA_H; options->lambdaE = REGULARIZATION_LAMBDA_E; options->lambdaGroup = REGULARIZATION_LAMBDA_GROUP; options->scale = REWEIGHTING_SCALE; options->zeroAPC = 0; options->maxIter = 0; options->usePairs = 1; options->estimator = INFER_MAP; options->estimatorMAP = INFER_MAP_PLM; options->target = NULL; options->alphabet = (char *) codesAA; /* Sequence weight options */ options->fastWeights = 0; options->theta = REWEIGHTING_THETA; /* SGD options */ options->sgd = 0; options->sgdBatchSize = SGD_BATCH_SIZE; /* Initialize PRNG */ init_genrand(42); /* Print usage if no arguments */ if (argc == 1) { fprintf(stderr, "%s", usage); exit(1); } /* Parse command line arguments */ for (int arg = 1; arg < argc; arg++) { if ((arg < argc-1) && (strcmp(argv[arg], "--output") == 0 || strcmp(argv[arg], "-o") == 0)) { outputFile = argv[++arg]; } else if ((arg < argc-1) && (strcmp(argv[arg], "--alphabet") == 0 || strcmp(argv[arg], "-a") == 0)) { options->alphabet = argv[++arg]; } else if ((arg < argc-1) && (strcmp(argv[arg], "--couplings") == 0 || strcmp(argv[arg], "-c") == 0)) { couplingsFile = argv[++arg]; } else if ((arg < argc-1) && (strcmp(argv[arg], "--lambdah") == 0 || strcmp(argv[arg], "-lh") == 0)) { options->lambdaH = atof(argv[++arg]); } else if ((arg < argc-1) && (strcmp(argv[arg], "--lambdae") == 0 || strcmp(argv[arg], "-le") == 0)) { options->lambdaE = atof(argv[++arg]); } else if ((arg < argc-1) && (strcmp(argv[arg], "--lambdag") == 0 || strcmp(argv[arg], "-lg") == 0)) { options->lambdaGroup = atof(argv[++arg]); } else if ((arg < argc-1) && (strcmp(argv[arg], "--theta") == 0 || strcmp(argv[arg], "-t") == 0)) { options->theta = atof(argv[++arg]); } else if ((arg < argc-1) && (strcmp(argv[arg], "--scale") == 0 || strcmp(argv[arg], "-s") == 0)) { options->scale = atof(argv[++arg]); } else if ((arg < argc-1) && (strcmp(argv[arg], "--maxiter") == 0 || strcmp(argv[arg], "-m") == 0)) { options->maxIter = atoi(argv[++arg]); } else if ((arg < argc-1) && (strcmp(argv[arg], "--independent") == 0 || strcmp(argv[arg], "-i") == 0)) { options->usePairs = 0; fprintf(stderr, "Independent model not yet implemented\n"); exit(0); } else if ((arg < argc-1) && (strcmp(argv[arg], "--gapreduce") == 0 || strcmp(argv[arg], "-g") == 0)) { options->estimatorMAP = INFER_MAP_PLM_GAPREDUCE; } else if ((arg < argc-1) && (strcmp(argv[arg], "--estimatele") == 0 || strcmp(argv[arg], "-ee") == 0)) { options->zeroAPC = 1; } else if ((arg < argc-1) && (strcmp(argv[arg], "--focus") == 0 || strcmp(argv[arg], "-f") == 0)) { options->target = argv[++arg]; } else if ((arg < argc-1) && strcmp(argv[arg], "--fast") == 0) { options->sgd = 1; options->fastWeights = 100; } else if ((arg < argc-1) && (strcmp(argv[arg], "--ncores") == 0 || strcmp(argv[arg], "-n") == 0)) { #if defined(_OPENMP) if (strcmp(argv[arg + 1], "max") == 0) { int maxThreads = omp_get_max_threads(); /* Redundant, but serves as sanity check */ omp_set_num_threads(maxThreads); fprintf(stderr, "OpenMP: Using %d of %d threads\n", maxThreads, maxThreads); } else { int numThreads = atoi(argv[arg + 1]); int maxThreads = omp_get_max_threads(); if (numThreads >= 1 && numThreads <= maxThreads) { omp_set_num_threads(numThreads); fprintf(stderr, "OpenMP: Using %d of %d threads\n", numThreads, maxThreads); } else if (numThreads > maxThreads) { omp_set_num_threads(maxThreads); fprintf(stderr, "OpenMP: More threads requested than " "available. Using %d of %d threads instead.\n", maxThreads, maxThreads); } else { omp_set_num_threads(1); fprintf(stderr, "OpenMP: Using 1 of %d threads\n", maxThreads); } } arg++; #else fprintf(stderr, "Error (-n/--ncores) only available when " "compiled with OpenMP\n"); exit(1); #endif } else if (strcmp(argv[arg], "--help") == 0 || strcmp(argv[arg], "-h") == 0) { fprintf(stderr, "%s", usage); exit(1); } } alignFile = argv[argc - 1]; /* Read multiple seqence alignment */ alignment_t *ali = MSARead(alignFile, options); /* Reweight sequences by inverse neighborhood density */ MSAReweightSequences(ali, options); /* Compute sitwise and pairwise marginal distributions */ MSACountMarginals(ali, options); /* Infer model parameters */ numeric_t *x = InferPairModel(ali, options); /* (Optionally) Output estimated parameters and coupling scores */ if (outputFile != NULL) OutputParametersFull(outputFile, x, ali, options); if (couplingsFile != NULL) OutputCouplingScores(couplingsFile, x, ali, options); /* Free alignment and options */ MSAFree(ali, options); } alignment_t *MSARead(char *alignFile, options_t *options) { /* Read FASTA-formatted alignment */ FILE *fpAli = NULL; if (alignFile != NULL) { fpAli = fopen(alignFile, "r"); } else { fprintf(stderr, "Must specify alignment file: -a ALIGN_FILE\n"); exit(1); } if (fpAli == NULL) { fprintf(stderr, "Error opening alignment file\n"); exit(1); } /* Allocate alignment */ alignment_t *ali = (alignment_t *) malloc(sizeof(alignment_t)); ali->nSeqs = ali->nSites = ali->nCodes = 0; ali->alphabet = options->alphabet; ali->names = NULL; ali->sequences = NULL; ali->target = -1; ali->offsets = NULL; ali->nEff = 0; ali->weights = ali->fi = ali->fij = NULL; ali->nParams = 0; /* Verify alignment dimensions and structure (first pass through file) */ char name[BUFFER_SIZE]; char seq[BUFFER_SIZE]; /* Read first line as name */ fgetstr(name, fpAli); if (*name == '>') { MSAReadSeq(seq, fpAli); } else { fprintf(stderr, "Error reading alignment:" " First line should start with >\n"); exit(1); } ali->nCodes = strlen(ali->alphabet); ali->nSites = strlen(seq); ali->nSeqs = 1; while (!feof(fpAli)) { char c = fgetc(fpAli); if (c == '>') { /* Read name and sequence pair */ fgetstr(name, fpAli); MSAReadSeq(seq, fpAli); } else { fprintf(stderr, "Error reading alignment:" " sequence records should start with >\n"); exit(1); } /* Validate sequence length */ if (strlen(seq) != ali->nSites) { fprintf(stderr, "Incompatible sequence length (%lu should be %d) for %s:\n%s\n", strlen(seq), ali->nSites, name, seq); exit(1); } ali->nSeqs++; } /* Encode full alignment block (second pass through file) */ ali->sequences = (letter_t *) malloc(ali->nSites * ali->nSeqs * sizeof(letter_t)); ali->names = (char **) malloc(ali->nSeqs * sizeof(char *)); for (int s = 0; s < ali->nSeqs; s++) for (int i = 0; i < ali->nSites; i++) seq(s, i) = 0; for (int s = 0; s < ali->nSeqs; s++) ali->names[s] = NULL; rewind(fpAli); for (int s = 0; s < ali->nSeqs; s++) { /* >Name */ getc(fpAli); fgetstr(name, fpAli); ali->names[s] = (char *) malloc((strlen(name) + 1) * sizeof(char)); strcpy(ali->names[s], name); /* Sequence */ MSAReadSeq(seq, fpAli); for (int i = 0; i < ali->nSites; i++) seq(s, i) = MSAReadCode(seq[i], ali->alphabet, ali->nCodes); } /* --------------------------------_DEBUG_--------------------------------*/ /* Alignment to stderr */ // for (int s = 0; s < 10; s++) { // for (int s = 0; s < ali->nSeqs; s++) { // for (int i = 0; i < ali->nSites; i++) // if (seq(s, i) >= 0 && seq(s, i) < ali->nCodes) { // fprintf(stderr, "%c", ali->alphabet[seq(s, i)]); // } else if (seq(s, i) >= -ali->nCodes && seq(s, i) < 0) { // fprintf(stderr, "%c", // tolower(ali->alphabet[seq(s, i) + ali->nCodes])); // } else { // fprintf(stderr, "*%d*", seq(s, i)); // } // fprintf(stderr, "\n"); // } // exit(0); /* --------------------------------^DEBUG^--------------------------------*/ /* Focus mode: If a focus sequence (target) is provided, locate it */ if (options->target != NULL) { for (int s = 0; s < ali->nSeqs; s++) if (strncmp(options->target, ali->names[s], strlen(options->target)) == 0) { if (ali->target >= 0) { fprintf(stderr, "Multiple sequences start with %s, picking sequence %d\n", options->target, s + 1); } else { ali->target = s; } } if (ali->target >= 0) { fprintf(stderr, "Found focus %s as sequence %d\n", options->target, ali->target + 1); } else { fprintf(stderr, "Could not find %s, proceeding without focus sequence\n", options->target); } } /* Always discard any sequences (rows) with out-of-alphabet characters */ int* seqValid = (int *) malloc(ali->nSeqs * sizeof(int)); for (int s = 0; s < ali->nSeqs; s++) seqValid[s] = 0; for (int s = 0; s < ali->nSeqs; s++) for (int i = 0; i < ali->nSites; i++) if ((seq(s, i) >= -ali->nCodes) && (seq(s, i) < ali->nCodes)) seqValid[s]++; int nValidSeqs = 0; for (int s = 0; s < ali->nSeqs; s++) if (seqValid[s] == ali->nSites) nValidSeqs++; fprintf(stderr, "%d valid sequences out of %d \n", nValidSeqs, ali->nSeqs); /* Recored indices of skipped sequences */ ali->nSkippedSeqs = ali->nSeqs - nValidSeqs; ali->skippedSeqs = (int *) malloc(ali->nSkippedSeqs * sizeof(int)); for (int s = 0, skipIndex = 0; s < ali->nSeqs; s++) if (seqValid[s] != ali->nSites) ali->skippedSeqs[skipIndex++] = s; /* Focus mode: select only focus columns (criteria below) */ int nValidSites = ali->nSites; int* siteValid = (int *) malloc(ali->nSites * sizeof(int)); for (int i = 0; i < ali->nSites; i++) siteValid[i] = 1; if (ali->target >= 0) { for (int i = 0; i < ali->nSites; i++) { /* For proteins, remove lower case and gap columns */ if ((ali->alphabet == codesAA) && (seq(ali->target, i) < 0)) siteValid[i] = 0; /* Discard gaps */ if ((ali->alphabet == codesAA) || (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE)) if (seq(ali->target, i) == 0) siteValid[i] = 0; } nValidSites = 0; for (int i = 0; i < ali->nSites; i++) if (siteValid[i] == 1) nValidSites++; fprintf(stderr, "%d sites out of %d\n", nValidSites, ali->nSites); } else { fprintf(stderr, "%d sites\n", ali->nSites); } /* Focus mode: parse region (NAME/START_IX-END_IX) and map offsets */ int leftOffset = 0; if (ali->target >= 0) { char *focusName = ali->names[ali->target]; /* Name should be immediately followed by '/' */ if (strlen(focusName) > strlen(options->target) + 1 && focusName[strlen(options->target)] == '/') { /* Attempt to read integer region start */ int regLeft = strlen(options->target) + 1; int ix = 0; if (isdigit(focusName[regLeft])) { while (regLeft + ix < strlen(focusName) && isdigit(focusName[regLeft + ix + 1])) ix++; int tens = 1; leftOffset = -1; for (int i = ix; i >= 0; i--) { leftOffset += tens * (focusName[regLeft + i] - '0'); tens *= 10; } fprintf(stderr, "Region starts at %d\n", leftOffset + 1); } else { fprintf(stderr, "Error parsing region, assuming start at 1"); } } /* Map the offsets */ ali->offsets = (int *) malloc(nValidSites * sizeof(int)); for (int i = 0; i < nValidSites; i++) ali->offsets[i] = i + 1; int ix = 0; for (int i = 0; i < ali->nSites; i++) if (siteValid[i] == 1) { ali->offsets[ix] = i + 1 + leftOffset; ix++; } /* Reposition the target for reduced alignment */ int targetShift = -1; for (int i = 0; i <= ali->target; i++) if (seqValid[i] == ali->nSites) targetShift++; ali->target = targetShift; } /* Copy only selected rows and columns */ if (nValidSeqs < ali->nSeqs || nValidSites < ali->nSites) { letter_t *seqsReduced = (letter_t *) malloc(nValidSites * nValidSeqs * sizeof(letter_t)); for (int i = 0; i < nValidSites * nValidSeqs; i++) seqsReduced[i] = 0; int sx = 0; for (int s = 0; s < ali->nSeqs; s++) if (seqValid[s] == ali->nSites) { int ix = 0; for (int i = 0; i < ali->nSites; i++) { if (siteValid[i] == 1) { seqsReduced[ix + sx * nValidSites] = seq(s, i); ix++; } } sx++; } /* Reallocate alignment with reduced dimensions */ free(ali->sequences); ali->nSeqs = nValidSeqs; ali->nSites = nValidSites; ali->sequences = (letter_t *) malloc(nValidSites * nValidSeqs * sizeof(letter_t)); for (int i = 0; i < nValidSites * nValidSeqs; i++) ali->sequences[i] = 0; for (int s = 0; s < nValidSeqs; s++) for (int i = 0; i < nValidSites; i++) seq(s, i) = seqsReduced[i + s * nValidSites]; free(seqsReduced); } /* Shift any lowercase codes back to uppercase */ for (int s = 0; s < ali->nSeqs; s++) for (int i = 0; i < ali->nSites; i++) if (seq(s, i) < 0) seq(s, i) += ali->nCodes; /* Intialize weights to 1.0 */ ali->weights = (numeric_t *) malloc(ali->nSeqs * sizeof(numeric_t)); for (int s = 0; s < ali->nSeqs; s++) ali->weights[s] = 1.0; ali->nEff = (numeric_t) ali->nSeqs; /* --------------------------------_DEBUG_--------------------------------*/ /* Display offset map */ // for (int i = 0; i < ali->nSites; i++) { // fprintf(stderr, "%d : %d : %c\n", i + 1, ali->offsets[i], // ali->alphabet[seq(ali->target, i)]); // } // exit(0); /* Display target */ // for (int i = 0; i < ali->nSites; i++) { // fprintf(stderr, "%c", ali->alphabet[seq(ali->target, i)]); // } // fprintf(stderr, "\n"); // exit(0); /* --------------------------------^DEBUG^--------------------------------*/ /* --------------------------------_DEBUG_--------------------------------*/ // for (int s = 0; s < ali->nSeqs; s++) { // fprintf(stderr, ">%s\n", ali->names[s]); // for (int i = 0; i < ali->nSites; i++) // fprintf(stderr, "%c", ali->alphabet[seq(s, i)]); // fprintf(stderr, "\n"); // } /* --------------------------------^DEBUG^--------------------------------*/ return ali; } void MSAReadSeq(char *seq, FILE *fpAli) { /* Read sequence from the current line(s) */ char buf[BUFFER_SIZE]; /* Look ahead one character */ char c = fgetc(fpAli); ungetc(c, fpAli); seq[0] = '\0'; while (c != '>' && !feof(fpAli)) { fgetstr(buf, fpAli); strcat(seq, buf); /* Look ahead one character */ c = fgetc(fpAli); ungetc(c, fpAli); } } letter_t MSAReadCode(char c, char *alphabet, int nCodes) { /* Encode a character as an integer between -nCodes and +nCodes In alphabet: store index [0, nCodes - 1] Lowercase version of alphabet: downshift by nCodes [-nCodes, -1] Out of alphabet: store nCodes [nCodes] */ letter_t i = 0; /* Protein-specific treatment of '.' */ if (alphabet == codesAA) if (c == '.') c = '-'; /* Store lowercase characters as down-shifted by nCodes */ while ((i < nCodes - 1) && toupper(c) != alphabet[i]) i++; if (c != alphabet[i] && toupper(c) == alphabet[i]) i -= nCodes; /* Encode out-of-alphabet characters by [nCodes] */ if (i > 0 && toupper(c) != alphabet[i]) i = nCodes; return i; } void MSAReweightSequences(alignment_t *ali, options_t *options) { /* Reweight seqeuences by their inverse neighborhood size. Each sequence's weight is the inverse of the number of neighboring sequences with less than THETA percent divergence */ for (int i = 0; i < ali->nSeqs; i++) ali->weights[i] = 1.0; /* Only apply reweighting if theta is on [0,1] */ if (options->theta >= 0 && options->theta <= 1) { /* The neighborhood size of each sequence is the number of sequences in the alignment within theta percent divergence */ if (options->fastWeights > 0 && options->fastWeights < ali->nSeqs) { /* Cluster the sequences with k-consensus */ int nClusters = options->fastWeights; int nIterations = 10; int nSeqs = ali->nSeqs; int nCodes = ali->nCodes; int nSites = ali->nSites; #define COUNTS(i,j,a) counts[i * nSites * nCodes + j * nCodes + a] #define CONSENSUS(i,j) consensus[i * nSites + j] #define ALI(i,j) aliPermute[i * nSites + j] /* Pick initial clusters with Reservoir sampling */ int *clusters = (int *) malloc(nClusters * sizeof(int)); letter_t *consensus = (letter_t *) malloc(nClusters * nSites * sizeof(letter_t)); for (int i = 0; i < nClusters; i++) clusters[i] = i; for (int i = nClusters; i < nSeqs; i++) { int ix = genrand_int32() % (i); if (ix < nClusters) clusters[ix] = i; } for (int i = 0; i < nClusters; i++) for (int j = 0; j < nSites; j++) CONSENSUS(i,j) = seq(clusters[i], j); free(clusters); /* EM steps */ int *assignment = (int *) malloc(nSeqs * sizeof(int)); int *counts = (int *) malloc(nClusters * nSites * nCodes * sizeof(int)); int *radii = (int *) malloc(nClusters * sizeof(int)); for (int i = 0; i < nSeqs; i++) assignment[i] = 0; fprintf(stderr, "Clustering"); for (int t = 0; t < nIterations; t++) { fprintf(stderr, "."); /* Step 1. Update the assignments */ for (int i = 0; i < nClusters; i++) radii[i] = 0; #pragma omp parallel for for (int s = 0; s < nSeqs; s++) { int ixOld = assignment[s]; /* Current distance to current assignment */ numeric_t distance = 0; for (int j = 0; j < nSites; j++) distance += (CONSENSUS(ixOld, j) != seq(s, j)); /* Find closest */ int ixNew = ixOld; for (int i = 0; i < nClusters; i++) { numeric_t distanceI = 0; for (int j = 0; j < nSites; j++) distanceI += (CONSENSUS(i, j) != seq(s, j)); if (distanceI < distance) { ixNew = i; distance = distanceI; } } if (ixNew != ixOld) assignment[s] = ixNew; if (radii[ixNew] < distance) radii[ixNew] = distance; } /* --------------------------_DEBUG_--------------------------*/ // for (int s = 0; s < nClusters; s++) { // int size = 0; // for (int i = 0; i < nSeqs; i++) size += (assignment[i] == s); // fprintf(stderr, ">Cluster %d, %d members, radius %d\n", s, size, radii[s]); // for (int i = 0; i < ali->nSites; i++) // if (CONSENSUS(s,i) >= 0) { // fprintf(stderr, "%c", ali->alphabet[CONSENSUS(s,i)]); // } else { // fprintf(stderr, " "); // } // fprintf(stderr, "\n"); // } /* --------------------------^DEBUG^--------------------------*/ /* Step 2. Update the consensus sequences */ /* Update the counts */ if (t < nIterations - 1) { for (int i = 0; i < nClusters * nSites * nCodes; i++) counts[i] = 0; for (int s = 0; s < nSeqs; s++) for (int j = 0; j < nSites; j++) COUNTS(assignment[s], j, seq(s, j)) += 1; #pragma omp parallel for for (int i = 0; i < nClusters; i++) for (int j = 0; j < nSites; j++) { int topCode = 0; int topCounts = COUNTS(i, j, 0); for (int b = 1; b < nCodes; b++) if (COUNTS(i, j, b) > topCounts) { topCode = b; topCounts = COUNTS(i, j, b); } CONSENSUS(i ,j) = topCode; } } } fprintf(stderr, "\n"); /* Profile-profile distances */ numeric_t *clusterID = (numeric_t *) malloc(nClusters * nClusters * sizeof(numeric_t)); for (int i = 0; i < nClusters * nClusters; i++) clusterID[i] = 0; #pragma omp parallel for for (int pi = 0; pi < nClusters; pi++) for (int pj = 0; pj < nClusters; pj++) for (int j = 0; j < nSites; j++) clusterID[pi + pj * nClusters] += (CONSENSUS(pi,j) == CONSENSUS(pj,j)); free(consensus); free(counts); /* Permute alignment */ int *clusterSizes = (int *) malloc(nClusters * sizeof(int)); int *clusterStart = (int *) malloc(nClusters * sizeof(int)); int *clusterEnd = (int *) malloc(nClusters * sizeof(int)); int *permuteMap = (int *) malloc(nSeqs * sizeof(int)); numeric_t *weightsP = (numeric_t *) malloc(nSeqs * sizeof(numeric_t)); letter_t *aliPermute = (letter_t *) malloc(nSeqs * nSites * sizeof(letter_t)); for (int i = 0; i < nClusters; i++) clusterSizes[i] = 0; for (int s = 0; s < ali->nSeqs; s++) clusterSizes[assignment[s]] += 1; int ix = 0; for (int i = 0; i < nClusters; i++) { clusterStart[i] = ix; ix += clusterSizes[i]; clusterEnd[i] = ix; } ix = 0; for (int i = 0; i < nClusters; i++) for (int s = 0; s < ali->nSeqs; s++) if (assignment[s] == i) { for (int j = 0; j < nSites; j++) ALI(ix,j) = seq(s,j); permuteMap[ix] = s; ix++; } /* ----------------------------_DEBUG_----------------------------*/ // for (int s = 0; s < nSeqs; s++) { // fprintf(stdout, ">Seq %d\n", s); // for (int i = 0; i < ali->nSites; i++) // fprintf(stdout, "%c", ali->alphabet[ALI(s,i)]); // fprintf(stdout, "\n"); // } /* ----------------------------^DEBUG^----------------------------*/ /* Sequence weights */ numeric_t cutoff = (numeric_t) ((1 - options->theta) * ali->nSites); for (int s = 0; s < nSeqs; s++) weightsP[s] = 1; #pragma omp parallel for for (int ci = 0; ci < nClusters; ci++) for (int cj = 0; cj < nClusters; cj++) if (clusterID[ci * nClusters + cj] >= 0.9 * cutoff) for (int s = clusterStart[ci]; s < clusterEnd[ci]; s++) for (int t = clusterStart[cj]; t < clusterEnd[cj]; t++) if (s != t) { int id = 0; for (int n = 0; n < ali->nSites; n++) id += (ALI(s, n) == ALI(t, n)); if (id >= cutoff) weightsP[s] += 1.0; } for (int s = 0; s < nSeqs; s++) ali->weights[permuteMap[s]] = weightsP[s]; #undef COUNTS #undef CONSENSUS #undef ALI free(clusterSizes); free(clusterStart); free(clusterEnd); free(permuteMap); free(weightsP); free(radii); free(aliPermute); } else { /* Deterministic sequence weights */ #if defined(_OPENMP) /* Naive parallelization is faster ignoring symmetry */ #pragma omp parallel for for (int s = 0; s < ali->nSeqs; s++) for (int t = 0; t < ali->nSeqs; t++) if (s != t) { int id = 0; for (int n = 0; n < ali->nSites; n++) id += (seq(s, n) == seq(t, n)); if (id >= ((1 - options->theta) * ali->nSites)) ali->weights[s] += 1.0; } #else /* For a single core, take advantage of symmetry */ for (int s = 0; s < ali->nSeqs - 1; s++) for (int t = s + 1; t < ali->nSeqs; t++) { int id = 0; for (int n = 0; n < ali->nSites; n++) id += (seq(s, n) == seq(t, n)); if (id >= ((1 - options->theta) * ali->nSites)) { ali->weights[s] += 1.0; ali->weights[t] += 1.0; } } #endif } /* Reweight sequences by the inverse of the neighborhood size */ for (int i = 0; i < ali->nSeqs; i++) ali->weights[i] = 1.0 / ali->weights[i]; } /* Scale sets the effective number of samples per neighborhood */ for (int i = 0; i < ali->nSeqs; i++) ali->weights[i] *= options->scale; /* The effective number of sequences is then the sum of the weights */ ali->nEff = 0; for (int i = 0; i < ali->nSeqs; i++) ali->nEff += ali->weights[i]; if (options->theta >= 0 && options->theta <= 1) { fprintf(stderr, "Effective number of samples: %.1f\t(%.0f%% identical neighborhood = %.3f samples)\n", ali->nEff, 100 * (1 - options->theta), options->scale); } else { fprintf(stderr, "Theta not between 0 and 1, no sequence reweighting applied (N = %.2f)\n", ali->nEff); } } void MSACountMarginals(alignment_t *ali, options_t *options) { /* Compute first and second order marginal distributions, according to the sequence weights */ if (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE) { /* Condition the marginals on ungapped */ ali->nCodes = strlen(ali->alphabet) - 1; /* First-order marginals P_i(Ai) */ int nFi = ali->nSites * ali->nCodes; ali->fi = (numeric_t *) malloc(nFi * sizeof(numeric_t)); for (int i = 0; i < nFi; i++) ali->fi[i] = 0.0; for (int s = 0; s < ali->nSeqs; s++) for (int i = 0; i < ali->nSites; i++) if (seq(s, i) > 0) fi(i, seq(s, i) - 1) += ali->weights[s]; /* Second-order marginals P_ij(Ai, Aj) */ int nFij = ali->nSites * (ali->nSites - 1) / 2 * ali->nCodes * ali->nCodes; ali->fij = (numeric_t *) malloc(nFij * sizeof(numeric_t)); for (int i = 0; i < nFij; i++) ali->fij[i] = 0.0; for (int s = 0; s < ali->nSeqs; s++) for (int i = 0; i < ali->nSites - 1; i++) for (int j = i + 1; j < ali->nSites; j++) if (seq(s, i) > 0) if(seq(s, j) > 0) fij(i, j, seq(s, i) - 1, seq(s, j) - 1) += ali->weights[s]; /* Normalize conditional distributions */ for (int i = 0; i < ali->nSites; i++) { double fsum = 0.0; for (int ai = 0; ai < ali->nCodes; ai++) fsum += fi(i, ai); if (fsum != 0) { double fsumInv = 1.0 / fsum; for (int ai = 0; ai < ali->nCodes; ai++) fi(i, ai) *= fsumInv; } else { /* Handle empty columns */ numeric_t flatF = 1.0 / ((numeric_t) ali->nCodes); for (int ai = 0; ai < ali->nCodes; ai++) fi(i, ai) = flatF; } } for (int i = 0; i < ali->nSites - 1; i++) for (int j = i + 1; j < ali->nSites; j++) { double fsum = 0.0; for (int ai = 0; ai < ali->nCodes; ai++) for (int aj = 0; aj < ali->nCodes; aj++) fsum += fij(i, j, ai, aj); if (fsum != 0) { double fsumInv = 1.0 / fsum; for (int ai = 0; ai < ali->nCodes; ai++) for (int aj = 0; aj < ali->nCodes; aj++) fij(i, j, ai, aj) *= fsumInv; } else { /* Handle pairs of empty columns */ numeric_t flatF = 1.0 / ((numeric_t) (ali->nCodes * ali->nCodes)); for (int ai = 0; ai < ali->nCodes; ai++) for (int aj = 0; aj < ali->nCodes; aj++) fij(i, j, ai, aj) = flatF; } } } else { /* Compute regular marginals */ numeric_t Zinv = 1.0 / ali->nEff; /* First-order marginals P_i(Ai) */ int nFi = ali->nSites * ali->nCodes; ali->fi = (numeric_t *) malloc(nFi * sizeof(numeric_t)); for (int i = 0; i < nFi; i++) ali->fi[i] = 0.0; for (int s = 0; s < ali->nSeqs; s++) for (int i = 0; i < ali->nSites; i++) fi(i, seq(s, i)) += ali->weights[s] * Zinv; /* Second-order marginals P_ij(Ai, Aj) */ int nFij = ali->nSites * (ali->nSites - 1) / 2 * ali->nCodes * ali->nCodes; ali->fij = (numeric_t *) malloc(nFij * sizeof(numeric_t)); for (int i = 0; i < nFij; i++) ali->fij[i] = 0.0; for (int s = 0; s < ali->nSeqs; s++) for (int i = 0; i < ali->nSites - 1; i++) for (int j = i + 1; j < ali->nSites; j++) fij(i, j, seq(s, i), seq(s, j)) += ali->weights[s] * Zinv; } } void MSAFree(alignment_t *ali, options_t *options) { /* Free alignment and options */ if (ali->names && ali->names[0]) for (int i = 0; i < ali->nSeqs; i++) free(ali->names[i]); free(ali->names); free(ali->sequences); free(ali->weights); free(ali->fi); free(ali->fij); /* Note: options->target and options->alphabet are never allocated */ free(options); } #define OUTPUT_PRECISION float void OutputParametersSite(char *outputFile, const numeric_t *x, alignment_t *ali) { FILE *fpOutput = NULL; fpOutput = fopen(outputFile, "w"); if (fpOutput != NULL) { /* 1: nSites */ fwrite(&(ali->nSites), sizeof(ali->nSites), 1, fpOutput); /* 2: (Focus mode only) target sequence */ if (ali->target >= 0) { for (int i = 0; i < ali->nSites; i++) { char c = (char) ali->alphabet[seq(ali->target, i)]; fwrite(&c, sizeof(char), 1, fpOutput); } } else { char c = ali->alphabet[0]; for (int i = 0; i < ali->nSites; i++) fwrite(&c, sizeof(c), 1, fpOutput); } /* 3: (Focus mode only) offset map */ if (ali->target >= 0) { for (int i = 0; i < ali->nSites; i++) { int ix = ali->offsets[i]; fwrite(&ix, sizeof(ix), 1, fpOutput); } } else { for (int i = 0; i < ali->nSites; i++) { int ix = i + 1; fwrite(&ix, sizeof(ix), 1, fpOutput); } } /* 4,5: sitewise marginals fi, twice */ for (int x = 0; x < 2; x++) for (int i = 0; i < ali->nSites; i++) for (int ai = 0; ai < ali->nCodes; ai++) { OUTPUT_PRECISION f = (OUTPUT_PRECISION) fi(i, ai); fwrite(&f, sizeof(f), 1, fpOutput); } /* 6: sitewise parameters hi */ for (int i = 0; i < ali->nSites; i++) for (int ai = 0; ai < ali->nCodes; ai++) { OUTPUT_PRECISION h = (OUTPUT_PRECISION) xHi(i, ai); fwrite(&h, sizeof(h), 1, fpOutput); } fclose(fpOutput); } else { fprintf(stderr, "Error writing parameters\n"); exit(1); } } void OutputParametersFull(char *outputFile, const numeric_t *x, alignment_t *ali, options_t *options) { /* File format */ FILE *fpOutput = NULL; fpOutput = fopen(outputFile, "w"); if (fpOutput != NULL) { /* 1: nSites */ int32_t nSites = (int32_t) ali->nSites; fwrite(&nSites, sizeof(nSites), 1, fpOutput); /* 2: nCodes */ int32_t nCodes = (int32_t) ali->nCodes; fwrite(&nCodes, sizeof(nCodes), 1, fpOutput); /* 3: nSeqs */ int32_t nSeqs = (int32_t) ali->nSeqs; fwrite(&nSeqs, sizeof(nSeqs), 1, fpOutput); /* 4: nSkippedSeqs */ int32_t nSkippedSeqs = (int32_t) ali->nSkippedSeqs; fwrite(&nSkippedSeqs, sizeof(nSkippedSeqs), 1, fpOutput); /* 5: number of iterations */ int32_t maxIter = (int32_t) options->maxIter; fwrite(&maxIter, sizeof(maxIter), 1, fpOutput); /* 6: theta */ OUTPUT_PRECISION theta = (OUTPUT_PRECISION) options->theta; fwrite(&theta, sizeof(theta), 1, fpOutput); /* 7: lambda for fields (lh) */ OUTPUT_PRECISION lh = (OUTPUT_PRECISION) options->lambdaH; fwrite(&lh, sizeof(lh), 1, fpOutput); /* 8: lambda for couplings (le) */ OUTPUT_PRECISION le = (OUTPUT_PRECISION) options->lambdaE; fwrite(&le, sizeof(le), 1, fpOutput); /* 9: group lambda for couplings (lg) */ OUTPUT_PRECISION lg = (OUTPUT_PRECISION) options->lambdaGroup; fwrite(&lg, sizeof(lg), 1, fpOutput); /* 10: effective sample size (nEff) */ OUTPUT_PRECISION nEff = (OUTPUT_PRECISION) ali->nEff; fwrite(&nEff, sizeof(nEff), 1, fpOutput); /* 11: alphabet */ int isGapped = (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE); for (int i = 0; i < ali->nCodes; i++) { int8_t letter = (int8_t) ali->alphabet[i + isGapped]; fwrite(&letter, sizeof(letter), 1, fpOutput); } /* 12: sequence number of neighbors (self included) */ int skipix = 0, reducedix = 0; for (int s = 0; s < ali->nSeqs + ali->nSkippedSeqs; s++) { if (skipix < ali->nSkippedSeqs && s == ali->skippedSeqs[skipix]) { /* Skip skipped sequences */ OUTPUT_PRECISION w = (OUTPUT_PRECISION) 0; fwrite(&w, sizeof(w), 1, fpOutput); skipix++; } else { numeric_t nNeighbors = ali->weights[reducedix]; nNeighbors = 1.0 / (nNeighbors * options->scale); OUTPUT_PRECISION w = (OUTPUT_PRECISION) nNeighbors; fwrite(&w, sizeof(w), 1, fpOutput); reducedix++; } } /* 13: (Focus mode) target sequence */ if (ali->target >= 0) { for (int i = 0; i < ali->nSites; i++) { int8_t c = (int8_t) ali->alphabet[seq(ali->target, i)]; fwrite(&c, sizeof(c), 1, fpOutput); } } else { int8_t c = (int8_t) ali->alphabet[0]; for (int i = 0; i < ali->nSites; i++) fwrite(&c, sizeof(c), 1, fpOutput); } /* 14: (Focus mode) offset map */ if (ali->target >= 0) { for (int i = 0; i < ali->nSites; i++) { int32_t ix = (int32_t) ali->offsets[i]; fwrite(&ix, sizeof(ix), 1, fpOutput); } } else { for (int i = 0; i < ali->nSites; i++) { int32_t ix = (int32_t) i + 1; fwrite(&ix, sizeof(ix), 1, fpOutput); } } /* 15: sitewise marginals fi */ for (int i = 0; i < ali->nSites; i++) for (int ai = 0; ai < ali->nCodes; ai++) { OUTPUT_PRECISION f = (OUTPUT_PRECISION) fi(i, ai); fwrite(&f, sizeof(f), 1, fpOutput); } /* 16: sitewise parameters hi */ for (int i = 0; i < ali->nSites; i++) for (int ai = 0; ai < ali->nCodes; ai++) { OUTPUT_PRECISION h = (OUTPUT_PRECISION) xHi(i, ai); fwrite(&h, sizeof(h), 1, fpOutput); } /* 17: pairwise marginals fij */ for (int i = 0; i < ali->nSites - 1; i++) for (int j = i + 1; j < ali->nSites; j++) for (int ai = 0; ai < ali->nCodes; ai++) for (int aj = 0; aj < ali->nCodes; aj++) { OUTPUT_PRECISION f = (OUTPUT_PRECISION) fij(i, j, ai, aj); fwrite(&f, sizeof(f), 1, fpOutput); } /* 18: couplings eij */ for (int i = 0; i < ali->nSites - 1; i++) for (int j = i + 1; j < ali->nSites; j++) for (int ai = 0; ai < ali->nCodes; ai++) for (int aj = 0; aj < ali->nCodes; aj++) { OUTPUT_PRECISION e = (OUTPUT_PRECISION) xEij(i, j, ai, aj); fwrite(&e, sizeof(e), 1, fpOutput); } fclose(fpOutput); } else { fprintf(stderr, "Error writing parameters\n"); exit(1); } } #undef OUTPUT_PRECISION void OutputCouplingScores(char *couplingsFile, const numeric_t *x, alignment_t *ali, options_t *options) { FILE *fpOutput = NULL; fpOutput = fopen(couplingsFile, "w"); if (fpOutput != NULL) { /* Compute the norm of the coupling parameters between each pair */ numeric_t *couplings = (numeric_t *) malloc((ali->nSites * (ali->nSites - 1) / 2) * sizeof(numeric_t)); for (int i = 0; i < ali->nSites * (ali->nSites - 1) / 2; i++) couplings[i] = 0; for (int i = 0; i < ali->nSites - 1; i++) for (int j = i + 1; j < ali->nSites; j++) { /* Norm(eij) over ai, aj */ numeric_t norm = 0.0; for (int ai = 0; ai < ali->nCodes; ai++) for (int aj = 0; aj < ali->nCodes; aj++) norm += xEij(i, j, ai, aj) * xEij(i, j, ai, aj); norm = sqrt(norm); coupling(i, j) = norm; } numeric_t nPairs = ((numeric_t) ((ali->nSites) * (ali->nSites - 1))) / 2.0; /* Remove first component of the norms (Average Product Correction) */ if (!options->zeroAPC) { /* Determine the site-wise statistics of the norms */ numeric_t C_avg = 0.0; numeric_t *C_pos_avg = (numeric_t *) malloc(ali->nSites * sizeof(numeric_t)); for (int i = 0; i < ali->nSites; i++) { C_pos_avg[i] = 0.0; } for (int i = 0; i < ali->nSites - 1; i++) { for (int j = i + 1; j < ali->nSites; j++) { C_pos_avg[i] += coupling(i, j) / (numeric_t) (ali->nSites - 1); C_pos_avg[j] += coupling(i, j) / (numeric_t) (ali->nSites - 1); C_avg += coupling(i, j) / nPairs; } } /* Remove the first component */ for (int i = 0; i < ali->nSites - 1; i++) for (int j = i + 1; j < ali->nSites; j++) coupling(i, j) = coupling(i, j) - C_pos_avg[i] * C_pos_avg[j] / C_avg; } /* Output scores */ if (ali->target >= 0) { /* Focus mode */ for (int i = 0; i < ali->nSites - 1; i++) for (int j = i + 1; j < ali->nSites; j++) { char ai = (char) ali->alphabet[seq(ali->target, i)]; char aj = (char) ali->alphabet[seq(ali->target, j)]; fprintf(fpOutput, "%d %c %d %c 0 %f\n", ali->offsets[i], ai, ali->offsets[j], aj, coupling(i, j)); } } else { for (int i = 0; i < ali->nSites - 1; i++) for (int j = i + 1; j < ali->nSites; j++) fprintf(fpOutput, "%d - %d - 0 %f\n", i + 1, j + 1, coupling(i, j)); } fclose(fpOutput); } else { fprintf(stderr, "Error writing coupling scores\n"); exit(1); } }
omp_parallel_sections_lastprivate.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" int test_omp_parallel_sections_lastprivate() { int sum; int sum0; int i; int i0; int known_sum; sum =0; sum0 = 0; i0 = -1; #pragma omp parallel sections private(i,sum0) lastprivate(i0) { #pragma omp section { sum0=0; for (i=1;i<400;i++) { sum0=sum0+i; i0=i; } #pragma omp critical { sum= sum+sum0; } } #pragma omp section { sum0=0; for(i=400;i<700;i++) { sum0=sum0+i; i0=i; } #pragma omp critical { sum= sum+sum0; } } #pragma omp section { sum0=0; for(i=700;i<1000;i++) { sum0=sum0+i; i0=i; } #pragma omp critical { sum= sum+sum0; } } } known_sum=(999*1000)/2; return ((known_sum==sum) && (i0==999) ); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_parallel_sections_lastprivate()) { num_failed++; } } return num_failed; }
timing.h
/*************************************************************************** * include/stxxl/bits/parallel/timing.h * * Provides a simple tool to do performance debugging, also in parallel code. * Extracted from MCSTL - http://algo2.iti.uni-karlsruhe.de/singler/mcstl/ * * Part of the STXXL. See http://stxxl.sourceforge.net * * Copyright (C) 2006 Johannes Singler <singler@ira.uka.de> * Copyright (C) 2014 Timo Bingmann <tb@panthema.net> * * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) **************************************************************************/ #ifndef STXXL_PARALLEL_TIMING_HEADER #define STXXL_PARALLEL_TIMING_HEADER #include <cstdio> #include <cstring> #include <cassert> #include <stxxl/bits/config.h> #include <stxxl/bits/parallel/tags.h> #if STXXL_PARALLEL #include <omp.h> #endif STXXL_BEGIN_NAMESPACE namespace parallel { /** Type of of point in time, used for the Timing classes. */ typedef double point_in_time; template <typename tag, typename must_be_int = int> class Timing; #if STXXL_PARALLEL /** A class that provides simple run time measurements, also for parallel code. * \param tag If active_tag, then the measurements are actually done. * Otherwise, no code at all is emitted by the compiler. */ template <typename must_be_int> class Timing<active_tag, must_be_int> { private: static const int max_points_in_time = 100; point_in_time points_in_time[max_points_in_time]; point_in_time active, last_start; int pos; char* str; const char* tags[max_points_in_time]; public: Timing() { str = NULL; pos = 0; active = 0.0; last_start = -1.0; } ~Timing() { delete[] str; } /** Take a running time measurement. * \param tag Optional description that will be output again with the timings. * It should describe the operation before the tic(). To time a series of \c n * operations, there should be \c n+1 calls to tic(), and one call to print(). */ inline void tic(const char* tag = NULL) { points_in_time[pos] = omp_get_wtime(); tags[pos] = tag; pos++; } /** Start the running time measurement. * * Should be paired with stop(). */ inline void start() { assert(last_start == -1.0); last_start = omp_get_wtime(); } /** Stop the running time measurement. * * Should be paired with start(). */ inline void stop() { assert(last_start != -1.0); active += (omp_get_wtime() - last_start); last_start = -1.0; } /** Reset running time accumulation. */ inline void reset() { active = 0.0; last_start = -1.0; } /** Accumulate the time between all pairs of start() and stop() so far */ inline point_in_time active_time() { return active; } /** Total time between first and last tic() */ inline point_in_time total_time() { return (points_in_time[pos - 1] - points_in_time[0]) * 1000.0; } private: /** Construct string to print out, presenting the timings. */ const char * c_str() { //avoid stream library here, to avoid cyclic dependencies in header files char tmp[1000]; if (!str) str = new char[pos * 200]; else str[0] = '\0'; sprintf(str, "t %2d T[ms]", omp_get_thread_num()); strcat(str, "\n"); for (int i = 0; i < pos; ) { point_in_time last = points_in_time[i]; i++; if (i == pos) break; if (tags[i] == NULL) sprintf(tmp, "%2d: ", i - 1); else sprintf(tmp, "%20s: ", tags[i]); strcat(str, tmp); sprintf(tmp, "%7.2f ", (points_in_time[i] - last) * 1000.0); strcat(str, tmp); strcat(str, "\n"); } return str; } public: /** Print the running times between the tic()s. */ void print() { printf("print\n"); #pragma omp barrier #pragma omp master printf("\n\n"); #pragma omp critical printf("%s\n", c_str()); } }; #endif // STXXL_PARALLEL /** A class that provides simple run time measurements, also for parallel code. * \param tag If active_tag, then the measurements are actually done, * otherwise, no code at all is emitted by the compiler. */ template <typename must_be_int> class Timing<inactive_tag, must_be_int> { private: static const char* empty_string; public: inline void tic(const char* /*tag*/ = NULL) { } inline void start() { } inline void stop() { } inline void reset() { } inline point_in_time active_time() { return -1.0; } inline point_in_time total_time() { return -1.0; } inline const char * c_str() { return empty_string; } inline void print() { } }; template <typename must_be_int> const char* Timing<inactive_tag, must_be_int>::empty_string = ""; } // namespace parallel STXXL_END_NAMESPACE #endif // !STXXL_PARALLEL_TIMING_HEADER
set_value_x_csc.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(ALPHA_SPMAT_CSC *A, const ALPHA_INT row, const ALPHA_INT col, const ALPHA_Number value) { ALPHA_INT num_thread = alpha_get_thread_num(); ALPHA_INT find = 0; #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) reduction(+:find) #endif for(ALPHA_INT ai = A->cols_start[col]; ai < A->cols_end[col]; ++ai) { const ALPHA_INT ar = A->row_indx[ai]; if(ar == row) { A->values[ai] = value; find ++; ai = A->cols_end[col]; } } if(find) return ALPHA_SPARSE_STATUS_SUCCESS; else return ALPHA_SPARSE_STATUS_INVALID_VALUE; }
pdlange.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzlange.c, normal z -> d, Fri Sep 28 17:38:12 2018 * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (double*)plasma_tile_addr(A, m, n) /***************************************************************************//** * Parallel tile calculation of max, one, infinity or Frobenius matrix norm * for a general matrix. ******************************************************************************/ void plasma_pdlange(plasma_enum_t norm, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (norm) { double stub; double *workspace; double *scale; double *sumsq; //================ // PlasmaMaxNorm //================ case PlasmaMaxNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dlange(PlasmaMaxNorm, mvam, nvan, A(m, n), ldam, &stub, &work[A.mt*n+m], sequence, request); } } #pragma omp taskwait plasma_core_omp_dlange(PlasmaMaxNorm, A.mt, A.nt, work, A.mt, &stub, value, sequence, request); break; //================ // PlasmaOneNorm //================ case PlasmaOneNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dlange_aux(PlasmaOneNorm, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); } } #pragma omp taskwait workspace = work + A.mt*A.n; plasma_core_omp_dlange(PlasmaInfNorm, A.n, A.mt, work, A.n, workspace, value, sequence, request); break; //================ // PlasmaInfNorm //================ case PlasmaInfNorm: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dlange_aux(PlasmaInfNorm, mvam, nvan, A(m, n), ldam, &work[A.m*n+m*A.mb], sequence, request); } } #pragma omp taskwait workspace = work + A.nt*A.m; plasma_core_omp_dlange(PlasmaInfNorm, A.m, A.nt, work, A.m, workspace, value, sequence, request); break; //====================== // PlasmaFrobeniusNorm //====================== case PlasmaFrobeniusNorm: scale = work; sumsq = work + A.mt*A.nt; for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dgessq(mvam, nvan, A(m, n), ldam, &scale[A.mt*n+m], &sumsq[A.mt*n+m], sequence, request); } } #pragma omp taskwait plasma_core_omp_dgessq_aux(A.mt*A.nt, scale, sumsq, value, sequence, request); break; } }
hash-benchmark.c
#include <assert.h> #include <errno.h> #include <fcntl.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <sys/time.h> #include <unistd.h> #include <mbedtls/md.h> #include <mbedtls/md_internal.h> const char testfile[] = "testfile"; unsigned char hashes[32][MBEDTLS_MD_MAX_SIZE]; int current_hash_sum = 0; #define TEST_PREV \ const mbedtls_md_info_t *md_info = \ mbedtls_md_info_from_type(MBEDTLS_MD_SHA256); \ if (md_info == NULL) { \ fprintf(stderr, "error: mbedtls_md_info_from_type failed\n"); \ exit(1); \ } \ \ mbedtls_md_context_t md_ctx; \ mbedtls_md_init(&md_ctx); \ if (mbedtls_md_setup(&md_ctx, md_info, 0 /* don't use HMAC */) != 0) { \ fprintf(stderr, "error: mbedtls_md_setup failed\n"); \ exit(1); \ } \ \ struct timeval start, end; \ gettimeofday(&start, NULL); #define TEST_AFTER(TEST_NAME) \ gettimeofday(&end, NULL); \ print_result(TEST_NAME, md_info, &start, &end); \ mbedtls_md_free(&md_ctx); void print_result(const char *testname, const mbedtls_md_info_t *md_info, struct timeval *start, struct timeval *end) { long seconds = end->tv_sec - start->tv_sec; long micros = 0; if (end->tv_usec >= start->tv_usec) { micros = end->tv_usec - start->tv_usec; } else { seconds--; micros = 1000000 + end->tv_usec - start->tv_usec; } fprintf(stderr, "%-32s took %03lds:%03ldms: ", testname, seconds, micros / 1000); for (int i = 0; i < mbedtls_md_get_size(md_info); ++i) { fprintf(stderr, "%02x", hashes[current_hash_sum][i]); } fprintf(stderr, "\n"); } void hash_whole_file_by_providing_path(void) { TEST_PREV; int rc = mbedtls_md_file(md_info, testfile, hashes[current_hash_sum]); assert(rc == 0); TEST_AFTER("mbedtls_md_file"); } void hash_whole_file_with_sequence_of_updates(const int buffersize) { TEST_PREV; int fd = open(testfile, O_RDONLY); if (fd < 0) { fprintf(stderr, "error: could not open file %s\n", testfile); exit(1); } int rc = mbedtls_md_starts(&md_ctx); assert(rc == 0); for (;;) { unsigned char buf[buffersize]; ssize_t n = read(fd, buf, sizeof buf); if (n > 0) { rc = mbedtls_md_update(&md_ctx, buf, n); assert(rc == 0); } else if (n < 0) { fprintf(stderr, "error while reading file %s\n", testfile); exit(1); } else { break; // EOF } } rc = mbedtls_md_finish(&md_ctx, hashes[current_hash_sum]); assert(rc == 0); close(fd); char msg[128]; snprintf(msg, sizeof msg, "seq of updates %8dkb", buffersize / 1024); TEST_AFTER(msg); } void hash_whole_file_with_2_threads() { TEST_PREV; struct stat st; int rc = stat(testfile, &st); assert(rc == 0); assert(st.st_size % 2 == 0); rc = mbedtls_md_starts(&md_ctx); assert(rc == 0); int num_threads = 0; #pragma omp parallel num_threads(2) { #pragma omp master num_threads = omp_get_num_threads(); #pragma omp sections { #pragma omp section { int fd = open(testfile, O_RDONLY); if (fd < 0) { fprintf(stderr, "error: could not open file %s\n", testfile); exit(1); } int rc = lseek(fd, 0, SEEK_SET); if (rc < 0) { fprintf(stderr, "error: lseek failed: %s\n", strerror(errno)); exit(1); } const int sz = st.st_size / 2; unsigned char buf[1024]; for (int i = 0; i < sz; i += sizeof buf) { ssize_t n = read(fd, buf, sizeof buf); if (n > 0) { #pragma omp critical { rc = mbedtls_md_update(&md_ctx, buf, n); } assert(rc == 0); } else if (n < 0) { fprintf(stderr, "error while reading file %s: %s\n", testfile, strerror(errno)); exit(1); } } close(fd); } #pragma omp section { int fd = open(testfile, O_RDONLY); if (fd < 0) { fprintf(stderr, "error: could not open file %s\n", testfile); exit(1); } const int sz = st.st_size / 2; int rc = lseek(fd, sz, SEEK_SET); if (rc < 0) { fprintf(stderr, "error: lseek failed: %s\n", strerror(errno)); exit(1); } unsigned char buf[1024]; for (int i = 0; i < sz; i += sizeof buf) { ssize_t n = read(fd, buf, sizeof buf); if (n > 0) { #pragma omp critical { rc = mbedtls_md_update(&md_ctx, buf, n); } assert(rc == 0); } else if (n < 0) { fprintf(stderr, "error while reading file %s: %s\n", testfile, strerror(errno)); exit(1); } } close(fd); } } } rc = mbedtls_md_finish(&md_ctx, hashes[current_hash_sum]); assert(rc == 0); char msg[128]; snprintf(msg, sizeof msg, "with %d threads", num_threads); TEST_AFTER(msg); } int main() { hash_whole_file_by_providing_path(); current_hash_sum++; hash_whole_file_with_sequence_of_updates(1024); current_hash_sum++; hash_whole_file_with_sequence_of_updates(4 * 1024); current_hash_sum++; hash_whole_file_with_sequence_of_updates(8 * 1024); current_hash_sum++; hash_whole_file_with_sequence_of_updates(32 * 1024); current_hash_sum++; hash_whole_file_with_sequence_of_updates(256 * 1024); current_hash_sum++; hash_whole_file_with_sequence_of_updates(1024 * 1024); current_hash_sum++; //hash_whole_file_with_2_threads(); //current_hash_sum++; return 0; }
JeeIOrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #include "Configuration.h" #if !defined(QMC_BUILD_SANDBOX_ONLY) #include "QMCWaveFunctions/WaveFunctionComponent.h" #endif #include "Particle/DistanceTableData.h" #include <simd/allocator.hpp> #include <simd/algorithm.hpp> #include <map> #include <numeric> namespace qmcplusplus { /** @ingroup WaveFunctionComponent * @brief Specialization for three-body Jastrow function using multiple functors * *Each pair-type can have distinct function \f$u(r_{ij})\f$. *For electrons, distinct pair correlation functions are used *for spins up-up/down-down and up-down/down-up. */ template<class FT> class JeeIOrbitalSoA : public WaveFunctionComponent { ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using RowContainer = DistanceTableData::RowContainer; ///table index for el-el const int ee_Table_ID_; ///table index for i-el const int ei_Table_ID_; //nuber of particles int Nelec, Nion; ///number of particles + padded size_t Nelec_padded; //number of groups of the target particleset int eGroups, iGroups; ///reference to the sources (ions) const ParticleSet& Ions; ///diff value RealType DiffVal; ///\f$Uat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Uat, oldUk, newUk; ///\f$dUat[i] = sum_(j) du_{i,j}\f$ using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>; gContainer_type dUat, olddUk, newdUk; ///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$ Vector<valT> d2Uat, oldd2Uk, newd2Uk; /// current values during PbyP valT cur_Uat, cur_d2Uat; posT cur_dUat, dUat_temp; ///container for the Jastrow functions Array<FT*, 3> F; std::map<std::string, FT*> J3Unique; //YYYY std::map<FT*, int> J3UniqueIndex; /// the cutoff for e-I pairs std::vector<valT> Ion_cutoff; /// the electrons around ions within the cutoff radius, grouped by species Array<std::vector<int>, 2> elecs_inside; Array<std::vector<valT>, 2> elecs_inside_dist; Array<std::vector<posT>, 2> elecs_inside_displ; /// the ids of ions within the cutoff radius of an electron on which a move is proposed std::vector<int> ions_nearby_old, ions_nearby_new; /// work buffer size size_t Nbuffer; /// compressed distances aligned_vector<valT> Distjk_Compressed, DistkI_Compressed, DistjI_Compressed; std::vector<int> DistIndice_k; /// compressed displacements gContainer_type Disp_jk_Compressed, Disp_jI_Compressed, Disp_kI_Compressed; /// work result buffer VectorSoaContainer<valT, 9> mVGL; // Used for evaluating derivatives with respect to the parameters int NumVars; Array<std::pair<int, int>, 3> VarOffset; Vector<RealType> dLogPsi; Array<PosType, 2> gradLogPsi; Array<RealType, 2> lapLogPsi; // Temporary store for parameter derivatives of functor // The first index is the functor index in J3Unique. The second is the parameter index w.r.t. to that // functor std::vector<std::vector<RealType>> du_dalpha; std::vector<std::vector<PosType>> dgrad_dalpha; std::vector<std::vector<Tensor<RealType, 3>>> dhess_dalpha; public: ///alias FuncType using FuncType = FT; JeeIOrbitalSoA(const ParticleSet& ions, ParticleSet& elecs, bool is_master = false) : ee_Table_ID_(elecs.addTable(elecs, DT_SOA)), ei_Table_ID_(elecs.addTable(ions, DT_SOA, true)), Ions(ions), NumVars(0) { ClassName = "JeeIOrbitalSoA"; init(elecs); } ~JeeIOrbitalSoA() {} WaveFunctionComponentPtr makeClone(ParticleSet& elecs) const { JeeIOrbitalSoA<FT>* eeIcopy = new JeeIOrbitalSoA<FT>(Ions, elecs, false); std::map<const FT*, FT*> fcmap; for (int iG = 0; iG < iGroups; iG++) for (int eG1 = 0; eG1 < eGroups; eG1++) for (int eG2 = 0; eG2 < eGroups; eG2++) { if (F(iG, eG1, eG2) == 0) continue; typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F(iG, eG1, eG2)); if (fit == fcmap.end()) { FT* fc = new FT(*F(iG, eG1, eG2)); eeIcopy->addFunc(iG, eG1, eG2, fc); fcmap[F(iG, eG1, eG2)] = fc; } } // Ye: I don't like the following memory allocated by default. eeIcopy->myVars.clear(); eeIcopy->myVars.insertFrom(myVars); eeIcopy->NumVars = NumVars; eeIcopy->dLogPsi.resize(NumVars); eeIcopy->gradLogPsi.resize(NumVars, Nelec); eeIcopy->lapLogPsi.resize(NumVars, Nelec); eeIcopy->VarOffset = VarOffset; eeIcopy->Optimizable = Optimizable; return eeIcopy; } void init(ParticleSet& p) { Nelec = p.getTotalNum(); Nelec_padded = getAlignedSize<valT>(Nelec); Nion = Ions.getTotalNum(); iGroups = Ions.getSpeciesSet().getTotalNum(); eGroups = p.groups(); Uat.resize(Nelec); dUat.resize(Nelec); d2Uat.resize(Nelec); oldUk.resize(Nelec); olddUk.resize(Nelec); oldd2Uk.resize(Nelec); newUk.resize(Nelec); newdUk.resize(Nelec); newd2Uk.resize(Nelec); F.resize(iGroups, eGroups, eGroups); F = nullptr; elecs_inside.resize(eGroups, Nion); elecs_inside_dist.resize(eGroups, Nion); elecs_inside_displ.resize(eGroups, Nion); ions_nearby_old.resize(Nion); ions_nearby_new.resize(Nion); Ion_cutoff.resize(Nion, 0.0); //initialize buffers Nbuffer = Nelec; mVGL.resize(Nbuffer); Distjk_Compressed.resize(Nbuffer); DistjI_Compressed.resize(Nbuffer); DistkI_Compressed.resize(Nbuffer); Disp_jk_Compressed.resize(Nbuffer); Disp_jI_Compressed.resize(Nbuffer); Disp_kI_Compressed.resize(Nbuffer); DistIndice_k.resize(Nbuffer); } void initUnique() { typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); du_dalpha.resize(J3Unique.size()); dgrad_dalpha.resize(J3Unique.size()); dhess_dalpha.resize(J3Unique.size()); int ifunc = 0; while (it != it_end) { J3UniqueIndex[it->second] = ifunc; FT& functor = *(it->second); int numParams = functor.getNumParameters(); du_dalpha[ifunc].resize(numParams); dgrad_dalpha[ifunc].resize(numParams); dhess_dalpha[ifunc].resize(numParams); ++it; ifunc++; } } void addFunc(int iSpecies, int eSpecies1, int eSpecies2, FT* j) { if (eSpecies1 == eSpecies2) { //if only up-up is specified, assume spin-unpolarized correlations if (eSpecies1 == 0) for (int eG1 = 0; eG1 < eGroups; eG1++) for (int eG2 = 0; eG2 < eGroups; eG2++) { if (F(iSpecies, eG1, eG2) == 0) F(iSpecies, eG1, eG2) = j; } } else { F(iSpecies, eSpecies1, eSpecies2) = j; F(iSpecies, eSpecies2, eSpecies1) = j; } if (j) { RealType rcut = 0.5 * j->cutoff_radius; for (int i = 0; i < Nion; i++) if (Ions.GroupID[i] == iSpecies) Ion_cutoff[i] = rcut; } else { APP_ABORT("JeeIOrbitalSoA::addFunc Jastrow function pointer is NULL"); } std::stringstream aname; aname << iSpecies << "_" << eSpecies1 << "_" << eSpecies2; J3Unique[aname.str()] = j; initUnique(); } /** check that correlation information is complete */ void check_complete() { //check that correlation pointers are either all 0 or all assigned bool complete = true; for (int i = 0; i < iGroups; ++i) { int nfilled = 0; bool partial; for (int e1 = 0; e1 < eGroups; ++e1) for (int e2 = 0; e2 < eGroups; ++e2) if (F(i, e1, e2) != 0) nfilled++; partial = nfilled > 0 && nfilled < eGroups * eGroups; if (partial) app_log() << "J3 eeI is missing correlation for ion " << i << std::endl; complete = complete && !partial; } if (!complete) { APP_ABORT("JeeIOrbitalSoA::check_complete J3 eeI is missing correlation components\n see preceding messages " "for details"); } //first set radii for (int i = 0; i < Nion; ++i) { FT* f = F(Ions.GroupID[i], 0, 0); if (f != 0) Ion_cutoff[i] = .5 * f->cutoff_radius; } //then check radii bool all_radii_match = true; for (int i = 0; i < iGroups; ++i) { if (F(i, 0, 0) != 0) { bool radii_match = true; RealType rcut = F(i, 0, 0)->cutoff_radius; for (int e1 = 0; e1 < eGroups; ++e1) for (int e2 = 0; e2 < eGroups; ++e2) radii_match = radii_match && F(i, e1, e2)->cutoff_radius == rcut; if (!radii_match) app_log() << "eeI functors for ion species " << i << " have different radii" << std::endl; all_radii_match = all_radii_match && radii_match; } } if (!all_radii_match) { APP_ABORT("JeeIOrbitalSoA::check_radii J3 eeI are inconsistent for some ion species\n see preceding messages " "for details"); } } //evaluate the distance table with els void resetTargetParticleSet(ParticleSet& P) {} /** check in an optimizable parameter * @param o a super set of optimizable variables */ void checkInVariables(opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->checkInVariables(active); (*it).second->checkInVariables(myVars); ++it; } } /** check out optimizable variables */ void checkOutVariables(const opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->myVars.getIndex(active); myVars.insertFrom((*it).second->myVars); ++it; } myVars.getIndex(active); NumVars = myVars.size(); if (NumVars) { dLogPsi.resize(NumVars); gradLogPsi.resize(NumVars, Nelec); lapLogPsi.resize(NumVars, Nelec); VarOffset.resize(iGroups, eGroups, eGroups); int varoffset = myVars.Index[0]; for (int ig = 0; ig < iGroups; ig++) for (int jg = 0; jg < eGroups; jg++) for (int kg = 0; kg < eGroups; kg++) { FT* func_ijk = F(ig, jg, kg); if (func_ijk == nullptr) continue; VarOffset(ig, jg, kg).first = func_ijk->myVars.Index.front() - varoffset; VarOffset(ig, jg, kg).second = func_ijk->myVars.Index.size() + VarOffset(ig, jg, kg).first; } } } ///reset the value of all the unique Two-Body Jastrow functions void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it++).second->resetParameters(active); } for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } } /** print the state, e.g., optimizables */ void reportStatus(std::ostream& os) { typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->myVars.print(os); ++it; } } void build_compact_list(ParticleSet& P) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); for (int iat = 0; iat < Nion; ++iat) for (int jg = 0; jg < eGroups; ++jg) { elecs_inside(jg, iat).clear(); elecs_inside_dist(jg, iat).clear(); elecs_inside_displ(jg, iat).clear(); } for (int jg = 0; jg < eGroups; ++jg) for (int jel = P.first(jg); jel < P.last(jg); jel++) for (int iat = 0; iat < Nion; ++iat) if (eI_table.Distances[jel][iat] < Ion_cutoff[iat]) { elecs_inside(jg, iat).push_back(jel); elecs_inside_dist(jg, iat).push_back(eI_table.Distances[jel][iat]); elecs_inside_displ(jg, iat).push_back(eI_table.Displacements[jel][iat]); } } LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { evaluateGL(P, G, L, true); return LogValue; } PsiValueType ratio(ParticleSet& P, int iat) { UpdateMode = ORB_PBYP_RATIO; const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); cur_Uat = computeU(P, iat, P.GroupID[iat], eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new); DiffVal = Uat[iat] - cur_Uat; return std::exp(static_cast<PsiValueType>(DiffVal)); } void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.refPS.GroupID[VP.refPtcl], VP.getDistTable(ei_Table_ID_).Distances[k], VP.getDistTable(ee_Table_ID_).Distances[k], ions_nearby_old)); } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); for (int jg = 0; jg < eGroups; ++jg) { const valT sumU = computeU(P, -1, jg, eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new); for (int j = P.first(jg); j < P.last(jg); ++j) { // remove self-interaction valT Uself(0); for (int iat = 0; iat < Nion; ++iat) { const valT& r_Ij = eI_table.Temp_r[iat]; const valT& r_Ik = eI_table.Distances[j][iat]; if (r_Ij < Ion_cutoff[iat] && r_Ik < Ion_cutoff[iat]) { const int ig = Ions.GroupID[iat]; Uself += F(ig, jg, jg)->evaluate(ee_table.Temp_r[j], r_Ij, r_Ik); } } ratios[j] = std::exp(Uat[j] + Uself - sumU); } } } GradType evalGrad(ParticleSet& P, int iat) { return GradType(dUat[iat]); } PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr, cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); DiffVal = Uat[iat] - cur_Uat; grad_iat += cur_dUat; return std::exp(static_cast<PsiValueType>(DiffVal)); } inline void restore(int iat) {} void acceptMove(ParticleSet& P, int iat) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); // get the old value, grad, lapl computeU3(P, iat, eI_table.Distances[iat], eI_table.Displacements[iat], ee_table.Distances[iat], ee_table.Displacements[iat], Uat[iat], dUat_temp, d2Uat[iat], oldUk, olddUk, oldd2Uk, ions_nearby_old); if (UpdateMode == ORB_PBYP_RATIO) { //ratio-only during the move; need to compute derivatives computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr, cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); } #pragma omp simd for (int jel = 0; jel < Nelec; jel++) { Uat[jel] += newUk[jel] - oldUk[jel]; d2Uat[jel] += newd2Uk[jel] - oldd2Uk[jel]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict new_g = newdUk.data(idim); const valT* restrict old_g = olddUk.data(idim); #pragma omp simd aligned(save_g, new_g, old_g) for (int jel = 0; jel < Nelec; jel++) save_g[jel] += new_g[jel] - old_g[jel]; } LogValue += Uat[iat] - cur_Uat; Uat[iat] = cur_Uat; dUat(iat) = cur_dUat; d2Uat[iat] = cur_d2Uat; const int ig = P.GroupID[iat]; // update compact list elecs_inside // if the old position exists in elecs_inside for (int iind = 0; iind < ions_nearby_old.size(); iind++) { int jat = ions_nearby_old[iind]; auto iter = std::find(elecs_inside(ig, jat).begin(), elecs_inside(ig, jat).end(), iat); auto iter_dist = elecs_inside_dist(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter); auto iter_displ = elecs_inside_displ(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter); if (eI_table.Temp_r[jat] < Ion_cutoff[jat]) // the new position is still inside { *iter_dist = eI_table.Temp_r[jat]; *iter_displ = eI_table.Temp_dr[jat]; *std::find(ions_nearby_new.begin(), ions_nearby_new.end(), jat) = -1; } else { *iter = elecs_inside(ig, jat).back(); elecs_inside(ig, jat).pop_back(); *iter_dist = elecs_inside_dist(ig, jat).back(); elecs_inside_dist(ig, jat).pop_back(); *iter_displ = elecs_inside_displ(ig, jat).back(); elecs_inside_displ(ig, jat).pop_back(); } } // if the old position doesn't exist in elecs_inside but the new position do for (int iind = 0; iind < ions_nearby_new.size(); iind++) { int jat = ions_nearby_new[iind]; if (jat >= 0) { elecs_inside(ig, jat).push_back(iat); elecs_inside_dist(ig, jat).push_back(eI_table.Temp_r[jat]); elecs_inside_displ(ig, jat).push_back(eI_table.Temp_dr[jat]); } } } inline void recompute(ParticleSet& P) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); build_compact_list(P); for (int jel = 0; jel < Nelec; ++jel) { computeU3(P, jel, eI_table.Distances[jel], eI_table.Displacements[jel], ee_table.Distances[jel], ee_table.Displacements[jel], Uat[jel], dUat_temp, d2Uat[jel], newUk, newdUk, newd2Uk, ions_nearby_new, true); dUat(jel) = dUat_temp; // add the contribution from the upper triangle #pragma omp simd for (int kel = 0; kel < jel; kel++) { Uat[kel] += newUk[kel]; d2Uat[kel] += newd2Uk[kel]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict new_g = newdUk.data(idim); #pragma omp simd aligned(save_g, new_g) for (int kel = 0; kel < jel; kel++) save_g[kel] += new_g[kel]; } } } inline valT computeU(const ParticleSet& P, int jel, int jg, const RealType* distjI, const RealType* distjk, std::vector<int>& ions_nearby) { ions_nearby.clear(); for (int iat = 0; iat < Nion; ++iat) if (distjI[iat] < Ion_cutoff[iat]) ions_nearby.push_back(iat); valT Uj = valT(0); for (int kg = 0; kg < eGroups; ++kg) { int kel_counter = 0; for (int iind = 0; iind < ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel != jel) { DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind]; Distjk_Compressed[kel_counter] = distjk[kel]; DistjI_Compressed[kel_counter] = r_jI; kel_counter++; if (kel_counter == Nbuffer) { const FT& feeI(*F(ig, jg, kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0) { const FT& feeI(*F(ig, jg, kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } return Uj; } inline void computeU3_engine(const ParticleSet& P, const FT& feeI, int kel_counter, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk) { constexpr valT czero(0); constexpr valT cone(1); constexpr valT ctwo(2); constexpr valT lapfac = OHMMS_DIM - cone; valT* restrict val = mVGL.data(0); valT* restrict gradF0 = mVGL.data(1); valT* restrict gradF1 = mVGL.data(2); valT* restrict gradF2 = mVGL.data(3); valT* restrict hessF00 = mVGL.data(4); valT* restrict hessF11 = mVGL.data(5); valT* restrict hessF22 = mVGL.data(6); valT* restrict hessF01 = mVGL.data(7); valT* restrict hessF02 = mVGL.data(8); feeI.evaluateVGL(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data(), val, gradF0, gradF1, gradF2, hessF00, hessF11, hessF22, hessF01, hessF02); // compute the contribution to jel, kel Uj = simd::accumulate_n(val, kel_counter, Uj); valT gradF0_sum = simd::accumulate_n(gradF0, kel_counter, czero); valT gradF1_sum = simd::accumulate_n(gradF1, kel_counter, czero); valT hessF00_sum = simd::accumulate_n(hessF00, kel_counter, czero); valT hessF11_sum = simd::accumulate_n(hessF11, kel_counter, czero); d2Uj -= hessF00_sum + hessF11_sum + lapfac * (gradF0_sum + gradF1_sum); std::fill_n(hessF11, kel_counter, czero); for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict jk = Disp_jk_Compressed.data(idim); valT* restrict jI = Disp_jI_Compressed.data(idim); valT* restrict kI = Disp_kI_Compressed.data(idim); valT dUj_x(0); #pragma omp simd aligned(gradF0, gradF1, gradF2, hessF11, jk, jI, kI) reduction(+ : dUj_x) for (int kel_index = 0; kel_index < kel_counter; kel_index++) { // recycle hessF11 hessF11[kel_index] += kI[kel_index] * jk[kel_index]; dUj_x += gradF1[kel_index] * jI[kel_index]; // destroy jk, kI const valT temp = jk[kel_index] * gradF0[kel_index]; dUj_x += temp; jk[kel_index] *= jI[kel_index]; kI[kel_index] = kI[kel_index] * gradF2[kel_index] - temp; } dUj[idim] += dUj_x; valT* restrict jk0 = Disp_jk_Compressed.data(0); if (idim > 0) { #pragma omp simd aligned(jk, jk0) for (int kel_index = 0; kel_index < kel_counter; kel_index++) jk0[kel_index] += jk[kel_index]; } valT* restrict dUk_x = dUk.data(idim); for (int kel_index = 0; kel_index < kel_counter; kel_index++) dUk_x[DistIndice_k[kel_index]] += kI[kel_index]; } valT sum(0); valT* restrict jk0 = Disp_jk_Compressed.data(0); #pragma omp simd aligned(jk0, hessF01) reduction(+ : sum) for (int kel_index = 0; kel_index < kel_counter; kel_index++) sum += hessF01[kel_index] * jk0[kel_index]; d2Uj -= ctwo * sum; #pragma omp simd aligned(hessF00, hessF22, gradF0, gradF2, hessF02, hessF11) for (int kel_index = 0; kel_index < kel_counter; kel_index++) hessF00[kel_index] = hessF00[kel_index] + hessF22[kel_index] + lapfac * (gradF0[kel_index] + gradF2[kel_index]) - ctwo * hessF02[kel_index] * hessF11[kel_index]; for (int kel_index = 0; kel_index < kel_counter; kel_index++) { const int kel = DistIndice_k[kel_index]; Uk[kel] += val[kel_index]; d2Uk[kel] -= hessF00[kel_index]; } } inline void computeU3(const ParticleSet& P, int jel, const RealType* distjI, const RowContainer& displjI, const RealType* distjk, const RowContainer& displjk, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk, std::vector<int>& ions_nearby, bool triangle = false) { constexpr valT czero(0); Uj = czero; dUj = posT(); d2Uj = czero; const int jg = P.GroupID[jel]; const int kelmax = triangle ? jel : Nelec; std::fill_n(Uk.data(), kelmax, czero); std::fill_n(d2Uk.data(), kelmax, czero); for (int idim = 0; idim < OHMMS_DIM; ++idim) std::fill_n(dUk.data(idim), kelmax, czero); ions_nearby.clear(); for (int iat = 0; iat < Nion; ++iat) if (distjI[iat] < Ion_cutoff[iat]) ions_nearby.push_back(iat); for (int kg = 0; kg < eGroups; ++kg) { int kel_counter = 0; for (int iind = 0; iind < ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; const posT disp_Ij = displjI[iat]; for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel < kelmax && kel != jel) { DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind]; DistjI_Compressed[kel_counter] = r_jI; Distjk_Compressed[kel_counter] = distjk[kel]; Disp_kI_Compressed(kel_counter) = elecs_inside_displ(kg, iat)[kind]; Disp_jI_Compressed(kel_counter) = disp_Ij; Disp_jk_Compressed(kel_counter) = displjk[kel]; DistIndice_k[kel_counter] = kel; kel_counter++; if (kel_counter == Nbuffer) { const FT& feeI(*F(ig, jg, kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0) { const FT& feeI(*F(ig, jg, kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } } inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Uat.begin(), Uat.end()); buf.add(dUat.data(), dUat.end()); buf.add(d2Uat.begin(), d2Uat.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Uat.free(); dUat.free(); d2Uat.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); dUat.attachReference(Nelec, Nelec_padded, buf.lendReference<valT>(Nelec_padded * OHMMS_DIM)); d2Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); build_compact_list(P); } void evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false) { if (fromscratch) recompute(P); LogValue = valT(0); for (int iat = 0; iat < Nelec; ++iat) { LogValue += Uat[iat]; G[iat] += dUat[iat]; L[iat] += d2Uat[iat]; } LogValue = -LogValue * 0.5; } void evaluateDerivatives(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi, std::vector<ValueType>& dhpsioverpsi) { bool recalculate(false); std::vector<bool> rcsingles(myVars.size(), false); for (int k = 0; k < myVars.size(); ++k) { int kk = myVars.where(k); if (kk < 0) continue; if (optvars.recompute(kk)) recalculate = true; rcsingles[k] = true; } if (recalculate) { constexpr valT czero(0); constexpr valT cone(1); constexpr valT cminus(-1); constexpr valT ctwo(2); constexpr valT lapfac = OHMMS_DIM - cone; const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); build_compact_list(P); dLogPsi = czero; gradLogPsi = PosType(); lapLogPsi = czero; for (int iat = 0; iat < Nion; ++iat) { const int ig = Ions.GroupID[iat]; for (int jg = 0; jg < eGroups; ++jg) for (int jind = 0; jind < elecs_inside(jg, iat).size(); jind++) { const int jel = elecs_inside(jg, iat)[jind]; const valT r_Ij = elecs_inside_dist(jg, iat)[jind]; const posT disp_Ij = cminus * elecs_inside_displ(jg, iat)[jind]; const valT r_Ij_inv = cone / r_Ij; for (int kg = 0; kg < eGroups; ++kg) for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel < jel) { const valT r_Ik = elecs_inside_dist(kg, iat)[kind]; const posT disp_Ik = cminus * elecs_inside_displ(kg, iat)[kind]; const valT r_Ik_inv = cone / r_Ik; const valT r_jk = ee_table.Distances[jel][kel]; const posT disp_jk = ee_table.Displacements[jel][kel]; const valT r_jk_inv = cone / r_jk; FT& func = *F(ig, jg, kg); int idx = J3UniqueIndex[F(ig, jg, kg)]; func.evaluateDerivatives(r_jk, r_Ij, r_Ik, du_dalpha[idx], dgrad_dalpha[idx], dhess_dalpha[idx]); int first = VarOffset(ig, jg, kg).first; int last = VarOffset(ig, jg, kg).second; std::vector<RealType>& dlog = du_dalpha[idx]; std::vector<PosType>& dgrad = dgrad_dalpha[idx]; std::vector<Tensor<RealType, 3>>& dhess = dhess_dalpha[idx]; for (int p = first, ip = 0; p < last; p++, ip++) { RealType& dval = dlog[ip]; PosType& dg = dgrad[ip]; Tensor<RealType, 3>& dh = dhess[ip]; dg[0] *= r_jk_inv; dg[1] *= r_Ij_inv; dg[2] *= r_Ik_inv; PosType gr_ee = dg[0] * disp_jk; gradLogPsi(p, jel) -= dg[1] * disp_Ij - gr_ee; lapLogPsi(p, jel) -= (dh(0, 0) + lapfac * dg[0] - ctwo * dh(0, 1) * dot(disp_jk, disp_Ij) * r_jk_inv * r_Ij_inv + dh(1, 1) + lapfac * dg[1]); gradLogPsi(p, kel) -= dg[2] * disp_Ik + gr_ee; lapLogPsi(p, kel) -= (dh(0, 0) + lapfac * dg[0] + ctwo * dh(0, 2) * dot(disp_jk, disp_Ik) * r_jk_inv * r_Ik_inv + dh(2, 2) + lapfac * dg[2]); dLogPsi[p] -= dval; } } } } } for (int k = 0; k < myVars.size(); ++k) { int kk = myVars.where(k); if (kk < 0) continue; dlogpsi[kk] = (ValueType)dLogPsi[k]; RealType sum = 0.0; for (int i = 0; i < Nelec; i++) { #if defined(QMC_COMPLEX) sum -= 0.5 * lapLogPsi(k, i); for (int jdim = 0; jdim < OHMMS_DIM; ++jdim) sum -= P.G[i][jdim].real() * gradLogPsi(k, i)[jdim]; #else sum -= 0.5 * lapLogPsi(k, i) + dot(P.G[i], gradLogPsi(k, i)); #endif } dhpsioverpsi[kk] = (ValueType)sum; } } } }; } // namespace qmcplusplus #endif
LUT.c
/* LUT.c * Defines the lookup table required by the Urbach-Wilkinson algorithm. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <limits.h> #include <string.h> #include <omp.h> #include "safeMalloc.h" #include "SIMD.h" #include "LUT.h" #define MAX(X, Y) (((X) > (Y)) ? (X) : (Y)) void allocateLUT(LUT* Ty, chordSet SE){ int r; size_t prePadX = 0, i; /* * The LUT is pre-padded on X. This is needed for dilation, where the * padding might contain valid information. */ if(SE.minX < 0){ prePadX = 0-SE.minX; } Ty->padX = prePadX; Ty->arr = (unsigned char***)safeMalloc((Ty->maxR-Ty->minR+1) * sizeof(char**)); for(r = 0; r < (Ty->maxR - Ty->minR + 1); r++){ Ty->arr[r] = (unsigned char**)safeMalloc(Ty->I * sizeof(char*)); for(i = 0; i < Ty->I ; i++){ Ty->arr[r][i] = (unsigned char*)safeCalloc((Ty->X + prePadX), sizeof(char)); /* Shifting the X index such that negative indices correspond to * the pre-padding. */ Ty->arr[r][i] = &(Ty->arr[r][i][prePadX]); } } /* * Shifting the R index so that it can be accessed from ymin <= r <= ymax * without needing offsets. */ Ty->arr = &(Ty->arr[0 - Ty->minR]); //I'm sorry Valgrind... } void freeLUT(LUT table){ int r; size_t i; // The R index was shifted, create a pointer to the original array unsigned char*** rp = &(table.arr[table.minR]); for(r=table.minR;r<=table.maxR;r++){ for(i=0;i<table.I;i++){ // The X index was also shifted, for padding purposes. free(table.arr[r][i] - table.padX); } free(table.arr[r]); } free(rp); } void circularSwapPointers(LUT Ty){ unsigned char** Ty0; int r; /* * Swap the pointers to r-indices in a circle. This is useful because * Ty(r,i,x) = Ty-1(r+1,i,x) for r < ymax. */ if(Ty.maxR - Ty.minR > 0){ Ty0 = Ty.arr[Ty.minR]; for( r = Ty.minR; r< Ty.maxR; r++){ Ty.arr[r] = Ty.arr[r+1]; } r = Ty.maxR; Ty.arr[r] = Ty0; } } void computeMinRow(image f, LUT Ty, chordSet SE, int r, size_t y){ size_t i, d; /* * Algorithm II.1 in Urbach-Wilkinson paper. * Computes lookup table for a single index of r. */ if(y+r >= 0 && y+r < f.H){ memcpy(Ty.arr[r][0], f.img[y + r], Ty.X); } else { memset(Ty.arr[r][0], 0, Ty.X); } for(i=1;i<SE.Lnum;i++){ d = SE.R[i] - SE.R[i - 1]; simdMin(Ty.arr[r][i], Ty.arr[r][i - 1], Ty.arr[r][i - 1] + d, MAX((int)Ty.X - (int)d, 0)); } } void updateMinLUT(image f, LUT* Ty, chordSet SE, size_t y, size_t tid, size_t num){ /* * When running in an OpenMP context, pointer swapping must be atomic. */ #pragma omp single { for(size_t i = 0; i < num; i++) circularSwapPointers(*Ty); } /* * When running in an OpenMP context, the index to update is shifted by the * thread ID. */ computeMinRow(f, *Ty, SE, Ty->maxR - tid, y); } LUT computeMinLUT(image f, chordSet SE, size_t y, size_t num){ int r; LUT Ty; Ty.I = SE.Lnum; Ty.X = f.W; Ty.minR = SE.minY; Ty.maxR = SE.maxY + num - 1; allocateLUT(&Ty, SE); #pragma omp parallel for for(r=Ty.minR; r<=Ty.maxR; r++){ computeMinRow(f, Ty, SE, r, y); } return Ty; } void computeMaxRow(image f, LUT Ty, chordSet SE, int r, size_t y){ size_t i, d; /* * Algorithm II.1 in Urbach-Wilkinson paper. * Computes lookup table for a single index of r. */ if(y+r >= 0 && y+r < f.H){ memcpy(Ty.arr[r][0], f.img[y + r], Ty.X); } else { memset(Ty.arr[r][0], 0, Ty.X); } for(i=1;i<SE.Lnum;i++){ d = SE.R[i] - SE.R[i - 1]; simdMax(Ty.arr[r][i] - Ty.padX, Ty.arr[r][i - 1] - Ty.padX, Ty.arr[r][i-1] + d - Ty.padX, Ty.X + Ty.padX - d); memcpy(Ty.arr[r][i] + Ty.X - d, Ty.arr[r][i - 1] + Ty.X - d, d); } } void updateMaxLUT(image f, LUT* Ty, chordSet SE, size_t y, size_t tid, size_t num){ /* * When running in an OpenMP context, pointer swapping must be atomic. */ #pragma omp single { for(size_t i = 0; i < num; i++) circularSwapPointers(*Ty); } /* * When running in an OpenMP context, the index to update is shifted by the * thread ID. */ computeMaxRow(f, *Ty, SE, Ty->maxR - tid, y); } LUT computeMaxLUT(image f, chordSet SE, size_t y, size_t num){ int r; LUT Ty; Ty.I = SE.Lnum; Ty.X = f.W; Ty.minR = SE.minY; Ty.maxR = SE.maxY + num - 1; allocateLUT(&Ty, SE); #pragma omp parallel for for(r=Ty.minR; r<=Ty.maxR; r++){ computeMaxRow(f, Ty, SE, r, y); } return Ty; } void printLUT(LUT Ty){ int r; size_t i, x; for(r = Ty.minR; r <= Ty.maxR; r++){ printf("r = %d:\n",r); for(i = 0; i < Ty.I; i++){ printf("\t| %d", Ty.arr[r][i][0]); for(x = 1; x < Ty.X; x++){ printf(" %d", Ty.arr[r][i][x]); } printf(" |\n"); } } }
radix.h
#pragma once #include <omp.h> #if 1 template<int BITS> struct Keys; template<> struct Keys<32> { private: typedef unsigned int ulong; typedef unsigned int uint; ulong key; public: Keys() {} Keys(const uint x) : key(static_cast<ulong>(x)) {} uint get_uint(const int i) const { return (key >> (32*i)) & static_cast<ulong>(0xFFFFFFFF); } Keys operator<<(const int bits) const { return key << bits; } Keys operator>>(const int bits) const { return key >> bits; } operator uint() const {return get_uint(0);} #if 1 Keys(const uint4 value) : key(static_cast<ulong>(value.x)) {} uint4 get_uint4() const { return (uint4){get_uint(0), 0,0,0}; } #endif }; template<> struct Keys<64> { private: typedef unsigned long long ulong; typedef unsigned int uint; ulong key; public: Keys() {} Keys(const uint x) : key(static_cast<ulong>(x)) {} uint get_uint(const int i) const { return (key >> (32*i)) & static_cast<ulong>(0xFFFFFFFF); } Keys operator<<(const int bits) const { return key << bits; } Keys operator>>(const int bits) const { return key >> bits; } operator uint() const {return get_uint(0);} #if 1 Keys(const uint4 value) : key((static_cast<ulong>(value.x) << 32) | static_cast<ulong>(value.y)) {} uint4 get_uint4() const { return (uint4){get_uint(1), get_uint(0),0,0}; } #endif }; #ifdef __SIZEOF_INT128__ template<> struct Keys<96> { private: typedef unsigned __int128 ulong; typedef unsigned int uint; ulong key; public: Keys() {} Keys(const uint x) : key(static_cast<ulong>(x)) {} uint get_uint(const int i) const { return (key >> (32*i)) & static_cast<ulong>(0xFFFFFFFF); } Keys operator<<(const int bits) const { return key << bits; } Keys operator>>(const int bits) const { return key >> bits; } operator uint() const {return get_uint(0);} #if 1 Keys(const uint4 value) : key((static_cast<ulong>(value.x) << 64) | (static_cast<ulong>(value.y) << 32) | static_cast<ulong>(value.z)) {} uint4 get_uint4() const { return (uint4){get_uint(2), get_uint(1), get_uint(0),0}; } #endif }; #endif template<int BITS> struct RadixSort { typedef Keys<BITS> key_t; private: enum { PAD = 1, numBits = 8, numBuckets = (1<<numBits), numBucketsPad = numBuckets * PAD }; int count; int blockSize; int gridDim; int numBlocks; key_t *sorted; int *excScanBlockPtr, *countsBlockPtr; public: int get_numBits() const {return numBits;} RadixSort(const int _count) : count(_count) { #pragma omp parallel #pragma omp master gridDim = omp_get_num_threads(); if (1) { blockSize = std::max((count/gridDim/64) & -64, 64); /* sandy bridge */ } else { blockSize = std::max((count/gridDim/4) & -64, 64); /* xeonphi */ } numBlocks = (count + blockSize - 1) / blockSize; const int ntmp = numBlocks * numBucketsPad; posix_memalign((void**)&sorted, 64, count*sizeof(key_t)); posix_memalign((void**)&excScanBlockPtr, 64, ntmp*sizeof(int)); posix_memalign((void**)& countsBlockPtr, 64, ntmp*sizeof(int)); int (*excScanBlock)[numBucketsPad] = (int (*)[numBucketsPad])excScanBlockPtr; int (* countsBlock)[numBucketsPad] = (int (*)[numBucketsPad]) countsBlockPtr; #pragma omp parallel { const int blockIdx = omp_get_thread_num(); for(int block = blockIdx; block < numBlocks; block += gridDim) #pragma simd for (int i = 0; i < numBuckets; i++) countsBlock[block][i] = excScanBlock[block][i] = 0; #pragma omp for for (int i = 0; i < count; i++) sorted[i] = 0; } } ~RadixSort() { free(sorted); free(excScanBlockPtr); free(countsBlockPtr); } private: void countPass( const key_t* keys, const int bit, const int count, int* counts) { // Compute the histogram of radix digits for this block only. This // corresponds exactly to the count kernel in the GPU implementation. const int mask = (1 << numBits) - 1; #pragma simd for (int i = 0; i < numBuckets; i++) counts[i] = 0; #if 1 for(int i = 0; i < count; ++i) { const int key = (keys[i] >> bit) & mask; counts[key]++; } #endif } void sortPass( const key_t * keys, key_t * sorted, int bit, const int count, const int* digitOffsets, int* counts) { // Compute the histogram of radix digits for this block only. This // corresponds exactly to the count kernel in the GPU implementation. const int numBuckets = 1 << numBits; const int mask = numBuckets - 1; #if 1 for(int i = 0; i < count; i++) { // Extract the key const int key = (keys[i]>> bit) & mask; const int rel = counts[key]; const int scatter = rel + digitOffsets[key]; sorted[scatter] = keys[i]; counts[key] = 1 + rel; } #endif } public: void sort(key_t *keys) { int countsGlobal[numBuckets] __attribute__((aligned(64))) = {0}; int excScanGlobal[numBuckets] __attribute__((aligned(64))) = {0}; int excScanBlockL[numBuckets] __attribute__((aligned(64))) = {0}; int digitOffsets[numBuckets] __attribute__((aligned(64))) = {0}; int (*excScanBlock)[numBucketsPad] = (int (*)[numBucketsPad])excScanBlockPtr; int (* countsBlock)[numBucketsPad] = (int (*)[numBucketsPad]) countsBlockPtr; #if 0 #define PROFILE #endif #ifdef PROFILE double dt1, dt2, dt3, dt4, dt5; double t0,t1; dt1=dt2=dt3=dt4=dt5=0.0; const double tbeg = rtc(); #endif #pragma omp parallel { const int blockIdx = omp_get_thread_num(); for(int bit = 0; bit < BITS; bit += numBits) { #ifdef PROFILE #pragma omp master t0 = rtc(); #endif /* histogramming each of the block */ for(int block = blockIdx; block < numBlocks; block += gridDim) countPass( keys + block*blockSize, bit, std::min(count - block*blockSize, blockSize), &countsBlock[block][0]); #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt1 += t1 - t0; t0 = t1; } #endif /* compute global histogram */ for (int digit = blockIdx; digit < numBuckets; digit += gridDim) { int sum = 0.0; for (int block = 0; block < numBlocks; block++) sum += countsBlock[block][digit]; countsGlobal[digit] = sum; } #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt2 += t1 - t0; t0 = t1; } #endif /* exclusive scan on the histogram */ #pragma omp single for(int digit = 1; digit < numBuckets; digit++) excScanGlobal[digit] = excScanGlobal[digit - 1] + countsGlobal[digit - 1]; #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt3 += t1 - t0; t0 = t1; } #endif /* computing offsets for each digit */ for (int digit = blockIdx; digit < numBuckets; digit += gridDim) { int dgt = digitOffsets[digit]; for (int block = 0; block < numBlocks; block++) { excScanBlock[block][digit] = dgt + excScanGlobal[digit]; dgt += countsBlock[block][digit]; } digitOffsets[digit] = 0; } #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt4 += t1 - t0; t0 = t1; } #endif /* sorting */ for(int block = blockIdx; block < numBlocks; block += gridDim) { int counts[numBuckets] = {0}; const int keyIndex = block * blockSize; sortPass( keys + keyIndex, sorted, bit, std::min(count - keyIndex, blockSize), &excScanBlock[block][0], &counts[0]); } #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt5 += t1 - t0; t0 = t1; } #endif #pragma omp single { #pragma simd for (int i = 0; i < numBuckets; i++) countsGlobal[i] = excScanGlobal[i] = 0; std::swap(keys, sorted); } } } #ifdef PROFILE const double tend = rtc(); printf("dt1= %g \n", dt1); printf("dt2= %g \n", dt2); printf("dt3= %g \n", dt3); printf("dt4= %g \n", dt4); printf("dt5= %g \n", dt5); printf("dt = %g \n", tend-tbeg); #endif } }; #endif #if 1 struct RadixSort64 { private: enum { PAD = 1, numBits = 8, numBuckets = (1<<numBits), numBucketsPad = numBuckets * PAD }; int count; int blockSize; int gridDim; int numBlocks; unsigned long long *sorted; int *excScanBlockPtr, *countsBlockPtr; public: int get_numBits() const {return numBits;} RadixSort64(const int _count) : count(_count) { #pragma omp parallel #pragma omp master gridDim = omp_get_num_threads(); if (1) { blockSize = std::max((count/gridDim/64) & -64, 64); /* sandy bridge */ } else { blockSize = std::max((count/gridDim/4) & -64, 64); /* xeonphi */ } numBlocks = (count + blockSize - 1) / blockSize; const int ntmp = numBlocks * numBucketsPad; posix_memalign((void**)&sorted, 64, count*sizeof(unsigned long long)); posix_memalign((void**)&excScanBlockPtr, 64, ntmp*sizeof(int)); posix_memalign((void**)& countsBlockPtr, 64, ntmp*sizeof(int)); int (*excScanBlock)[numBucketsPad] = (int (*)[numBucketsPad])excScanBlockPtr; int (* countsBlock)[numBucketsPad] = (int (*)[numBucketsPad]) countsBlockPtr; #pragma omp parallel { const int blockIdx = omp_get_thread_num(); for(int block = blockIdx; block < numBlocks; block += gridDim) #pragma simd for (int i = 0; i < numBuckets; i++) countsBlock[block][i] = excScanBlock[block][i] = 0; #pragma omp for for (int i = 0; i < count; i++) sorted[i] = 0; } } ~RadixSort64() { free(sorted); free(excScanBlockPtr); free(countsBlockPtr); } private: void countPass( const unsigned long long* keys, const int bit, const int count, int* counts) { // Compute the histogram of radix digits for this block only. This // corresponds exactly to the count kernel in the GPU implementation. const unsigned long long mask = (1 << numBits) - 1; #pragma simd for (int i = 0; i < numBuckets; i++) counts[i] = 0; #if 1 for(int i = 0; i < count; ++i) { const int key = mask & (keys[i] >> bit); counts[key]++; } #endif } void sortPass( const unsigned long long* keys, unsigned long long* sorted, int bit, const int count, const int* digitOffsets, int* counts) { // Compute the histogram of radix digits for this block only. This // corresponds exactly to the count kernel in the GPU implementation. const int numBuckets = 1<< numBits; const unsigned long long mask = numBuckets - 1; #if 1 for(int i = 0; i < count; i++) { // Extract the key const int key = mask & (keys[i]>> bit); const int rel = counts[key]; const int scatter = rel + digitOffsets[key]; sorted[scatter] = keys[i]; counts[key] = 1 + rel; } #endif } public: void sort(unsigned long long *keys) { int countsGlobal[numBuckets] __attribute__((aligned(64))) = {0}; int excScanGlobal[numBuckets] __attribute__((aligned(64))) = {0}; int excScanBlockL[numBuckets] __attribute__((aligned(64))) = {0}; int digitOffsets[numBuckets] __attribute__((aligned(64))) = {0}; int (*excScanBlock)[numBucketsPad] = (int (*)[numBucketsPad])excScanBlockPtr; int (* countsBlock)[numBucketsPad] = (int (*)[numBucketsPad]) countsBlockPtr; #if 0 #define PROFILE #endif #ifdef PROFILE double dt1, dt2, dt3, dt4, dt5; double t0,t1; dt1=dt2=dt3=dt4=dt5=0.0; const double tbeg = rtc(); #endif #pragma omp parallel { const int blockIdx = omp_get_thread_num(); for(int bit = 0; bit < 64; bit += numBits) { #ifdef PROFILE #pragma omp master t0 = rtc(); #endif /* histogramming each of the block */ for(int block = blockIdx; block < numBlocks; block += gridDim) countPass( keys + block*blockSize, bit, std::min(count - block*blockSize, blockSize), &countsBlock[block][0]); #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt1 += t1 - t0; t0 = t1; } #endif /* compute global histogram */ for (int digit = blockIdx; digit < numBuckets; digit += gridDim) { int sum = 0.0; for (int block = 0; block < numBlocks; block++) sum += countsBlock[block][digit]; countsGlobal[digit] = sum; } #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt2 += t1 - t0; t0 = t1; } #endif /* exclusive scan on the histogram */ #pragma omp single for(int digit = 1; digit < numBuckets; digit++) excScanGlobal[digit] = excScanGlobal[digit - 1] + countsGlobal[digit - 1]; #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt3 += t1 - t0; t0 = t1; } #endif /* computing offsets for each digit */ for (int digit = blockIdx; digit < numBuckets; digit += gridDim) { int dgt = digitOffsets[digit]; for (int block = 0; block < numBlocks; block++) { excScanBlock[block][digit] = dgt + excScanGlobal[digit]; dgt += countsBlock[block][digit]; } digitOffsets[digit] = 0; } #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt4 += t1 - t0; t0 = t1; } #endif /* sorting */ for(int block = blockIdx; block < numBlocks; block += gridDim) { int counts[numBuckets] = {0}; const int keyIndex = block * blockSize; sortPass( keys + keyIndex, sorted, bit, std::min(count - keyIndex, blockSize), &excScanBlock[block][0], &counts[0]); } #pragma omp barrier #ifdef PROFILE #pragma omp master { t1 = rtc(); dt5 += t1 - t0; t0 = t1; } #endif #pragma omp single { #pragma simd for (int i = 0; i < numBuckets; i++) countsGlobal[i] = excScanGlobal[i] = 0; std::swap(keys, sorted); } } } #ifdef PROFILE const double tend = rtc(); printf("dt1= %g \n", dt1); printf("dt2= %g \n", dt2); printf("dt3= %g \n", dt3); printf("dt4= %g \n", dt4); printf("dt5= %g \n", dt5); printf("dt = %g \n", tend-tbeg); #endif } }; #endif
Fig_7.4_schroProg.c
// sample compile command: "gcc -fopenmp -c Fig_7.4_schroProg.c" to generate *.o object file #include <stdbool.h> #include <omp.h> #include <stdio.h> // Three functions we use but do not show here: // 1. Set seed for a pseudo-random sequence void seedIt(long *val); // 2. function to flip a coin (randomly return true or false) bool flip(long *coin); // 3. wait a short random amount of time double waitAbit(); int main() { double wait_val; long rand, i; int dead_or_alive; omp_set_num_threads(2); // "flip a coin" to choose which task is for the dead // cat and which for the living cat. long coin; seedIt(&coin); bool HorT = flip(&coin); printf("Schrodinger's program says the cat is"); #pragma omp parallel shared(HorT, dead_or_alive) { // These tasks are participating in a data race #pragma omp task { double val = waitAbit(); dead_or_alive = HorT; } #pragma omp task { double val = waitAbit(); dead_or_alive = !HorT; } } if (dead_or_alive) printf(" alive. \n"); else printf(" dead. \n"); return 0; }
GB_unop__exp_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp_fp64_fp64 // op(A') function: GB_unop_tran__exp_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = exp (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = exp (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = exp (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = exp (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = exp (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_uint8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_int32 // op(A') function: GB_tran__ainv_uint8_int32 // C type: uint8_t // A type: int32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_int32 ( uint8_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
H2ERI_build_exchange.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <omp.h> #include "H2Pack_matvec.h" #include "H2Pack_utils.h" #include "H2ERI_typedef.h" #include "H2ERI_build_exchange.h" #include "H2ERI_utils.h" #include "utils.h" // In H2Pack struct Kmat_workbuf { int row_max_level; // Row subtree max level int row_idx_len; // Length of row_idx int row_n_leaf_node; // Number of leaf nodes in row subtree int M_cut; // Index of the first element > N shell index in plist{N} int col_max_level; // Column subtree max level int col_idx_len; // Length of col_idx int col_n_leaf_node; // Number of leaf nodes in column subtree int P_cut; // Index of the first element > Q shell index in plist{Q} int P_list_len; // Length of P_list int vec_out_idx_size; // Size of vec_out_idx int nvi_nnz_ridx_size; // Size of nvi_nnz_row_idx int nvo_nnz_ridx_size; // Size of nvo_nnz_row_idx int int_buffer_size; // Size of int_buffer int dbl_buffer_size; // Size of dbl_buffer int nvec; // Current matmul output matrix number of columns, == N_nbf * Q_nbf int *MN_bfp_sidx; // Size h2eri->nshell+1, indices of each bra-side shell pair's first basis function pair int *row_idx; // Size h2eri->{nshell * max_shell_nbf^2}, H2 matvec row indices int *row_leaf_nodes; // Size h2pack->n_leaf_node, row subtree leaf nodes int *row_node_flag; // Size h2pack->n_node, mark if a node in h2pack is in row subtree int *row_idx_ascend; // Size h2eri->{nshell * max_shell_nbf^2}, row_idx sorted in ascending int *row_idx_pmt; // Size h2eri->{nshell * max_shell_nbf^2}, original indices of row_idx_ascend int *vec_out_sidx; // Size h2pack->n_node+1, indices of each node's vec_out_idx int *P_list; // Size h2eri->nshell, list of significant P shells int *P_idx; // Size h2eri->nshell, indices of P_list[i] in H2ERI_build_exchange->plist int *col_idx; // Size h2eri->{nshell * max_shell_nbf^2}, H2 matvec column indices int *col_leaf_nodes; // Size h2pack->n_leaf_node, column subtree leaf nodes int *col_node_flag; // Size h2pack->n_node, mark if a node in h2pack is in column subtree int *col_idx_pmt; // Size h2eri->{nshell * max_shell_nbf^2}, original indices of row_idx int *col_idx_ipmt; // Size h2eri->{nshell * max_shell_nbf^2}, inverse function of col_idx_pmt int *nvi_nnz_row_sidx; // Size h2pack->n_node+1, indices of each node's nvi_nnz_row_idx int *nvo_nnz_row_sidx; // Size h2pack->n_node+1, indices of each node's nvo_nnz_row_idx int *nvi_nnz_sidx; // Size h2pack->n_node+1, indices of each node's node_vec_in int *nvo_nnz_sidx; // Size h2pack->n_node+1, indices of each node's node_vec_out int *y0_sidx; // Size h2pack->n_node+1, indices of each node's y0 int *y1_sidx; // Size h2pack->n_node+1, indices of each node's y1 int *tmp_arr; // Size h2pack->max_leaf_points, temporary array int *int_buffer; // == all int* arrays above vec_out_idx, to reduce memory fragments int *vec_out_idx; // Size unknown, mapping from each node's nvo_nnz to out_vec int *nvi_nnz_row_idx; // Size unknown, non-zero row indices of each node's input vectors int *nvo_nnz_row_idx; // Size unknown, non-zero row indices of each node's unrestricted output vector double *vec_in; // Size unknown, partial matmul input vectors double *vec_out; // Size unknown, partial matmul output vectors restricted from each node's output double *nvi_nnz; // Size unknown, non-zero rows of partial matmul input vectors extended to each node double *nvo_nnz; // Size unknown, non-zero rows of partial matmul output vectors double *y0; // Size unknown, partial matmul intermediate variables double *y1; // Size unknown, partial matmul intermediate variables double *tmp_K; // Size h2eri->max_shell_nbf, temporary array for K mat accumulation double *dbl_buffer; // == vec_in + vec_out + node_vec_in + node_vec_out + y0 + y1, to reduce memory fragments double timers[5]; // Profiling timers }; typedef struct Kmat_workbuf Kmat_workbuf_s; typedef struct Kmat_workbuf *Kmat_workbuf_p; typedef enum { BUILD_K_AUX_TIMER_IDX = 0, // Auxiliary data structure construction BUILD_K_MM_FWD_TIMER_IDX, // H2 partial matmul forward transformation BUILD_K_MM_MID_TIMER_IDX, // H2 partial matmul intermediate multiplication BUILD_K_MM_BWD_TIMER_IDX, // H2 partial matmul backward transformation BUILD_K_MM_DEN_TIMER_IDX // H2 partial matmul dense multiplication } build_exchange_timer_idx_t; // Initialize each thread's K mat build work buffer void H2ERI_exchange_workbuf_init(H2ERI_p h2eri) { int nshell = h2eri->nshell; int max_shell_nbf = h2eri->max_shell_nbf; int n_thread = h2eri->h2pack->n_thread; int n_node = h2eri->h2pack->n_node; int n_leaf_node = h2eri->h2pack->n_leaf_node; int num_sp_bfp = h2eri->num_sp_bfp; Kmat_workbuf_p *thread_Kmat_workbuf = (Kmat_workbuf_p *) malloc(sizeof(Kmat_workbuf_p) * n_thread); #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); Kmat_workbuf_p workbuf = (Kmat_workbuf_p) malloc(sizeof(Kmat_workbuf_s)); memset(workbuf, 0, sizeof(Kmat_workbuf_s)); int int_buffer_size = 0; // MN_bfp_sidx int_buffer_size += nshell + 1; // row_idx, row_leaf_nodes, row_node_flag int_buffer_size += nshell * max_shell_nbf * max_shell_nbf; int_buffer_size += n_leaf_node; int_buffer_size += n_node; // row_idx_ascend, row_idx_pmt int_buffer_size += nshell * max_shell_nbf * max_shell_nbf; int_buffer_size += nshell * max_shell_nbf * max_shell_nbf; // vec_out_sidx, P_list, P_idx int_buffer_size += n_node + 1; int_buffer_size += nshell; int_buffer_size += nshell; // col_idx, col_leaf_nodes, col_node_flag, col_idx_pmt, col_idx_ipmt int_buffer_size += nshell * max_shell_nbf * max_shell_nbf; int_buffer_size += n_leaf_node; int_buffer_size += n_node; int_buffer_size += nshell * max_shell_nbf * max_shell_nbf; int_buffer_size += nshell * max_shell_nbf * max_shell_nbf; // nvi_nnz_row_sidx, nvo_nnz_row_sidx, nvi_nnz_sidx, nvo_nnz_sidx int_buffer_size += n_node + 1; int_buffer_size += n_node + 1; int_buffer_size += n_node + 1; int_buffer_size += n_node + 1; // y0_sidx, y1_sidx, tmp_arr int_buffer_size += n_node + 1; int_buffer_size += n_node + 1; int_buffer_size += num_sp_bfp; // This is a safe upper bound for tmp_arr int *int_buffer = (int *) malloc(sizeof(int) * int_buffer_size); ASSERT_PRINTF(int_buffer != NULL, "Failed to allocate int_buffer of size %d\n", int_buffer_size); memset(int_buffer, 0, sizeof(int) * int_buffer_size); workbuf->int_buffer_size = int_buffer_size; workbuf->int_buffer = int_buffer; workbuf->MN_bfp_sidx = workbuf->int_buffer; workbuf->row_idx = workbuf->MN_bfp_sidx + nshell + 1; workbuf->row_leaf_nodes = workbuf->row_idx + nshell * max_shell_nbf * max_shell_nbf; workbuf->row_node_flag = workbuf->row_leaf_nodes + n_leaf_node; workbuf->row_idx_ascend = workbuf->row_node_flag + n_node; workbuf->row_idx_pmt = workbuf->row_idx_ascend + nshell * max_shell_nbf * max_shell_nbf; workbuf->vec_out_sidx = workbuf->row_idx_pmt + nshell * max_shell_nbf * max_shell_nbf; workbuf->P_list = workbuf->vec_out_sidx + n_node + 1; workbuf->P_idx = workbuf->P_list + nshell; workbuf->col_idx = workbuf->P_idx + nshell; workbuf->col_leaf_nodes = workbuf->col_idx + nshell * max_shell_nbf * max_shell_nbf; workbuf->col_node_flag = workbuf->col_leaf_nodes + n_leaf_node; workbuf->col_idx_pmt = workbuf->col_node_flag + n_node; workbuf->col_idx_ipmt = workbuf->col_idx_pmt + nshell * max_shell_nbf * max_shell_nbf; workbuf->nvi_nnz_row_sidx = workbuf->col_idx_ipmt + nshell * max_shell_nbf * max_shell_nbf; workbuf->nvo_nnz_row_sidx = workbuf->nvi_nnz_row_sidx + n_node + 1; workbuf->nvi_nnz_sidx = workbuf->nvo_nnz_row_sidx + n_node + 1; workbuf->nvo_nnz_sidx = workbuf->nvi_nnz_sidx + n_node + 1; workbuf->y0_sidx = workbuf->nvo_nnz_sidx + n_node + 1; workbuf->y1_sidx = workbuf->y0_sidx + n_node + 1; workbuf->tmp_arr = workbuf->y1_sidx + n_node + 1; workbuf->vec_out_idx_size = 0; workbuf->nvi_nnz_ridx_size = 0; workbuf->nvo_nnz_ridx_size = 0; workbuf->vec_out_idx = NULL; workbuf->nvi_nnz_row_idx = NULL; workbuf->nvo_nnz_row_idx = NULL; workbuf->dbl_buffer_size = 0; workbuf->dbl_buffer = NULL; workbuf->vec_in = NULL; workbuf->vec_out = NULL; workbuf->nvi_nnz = NULL; workbuf->nvo_nnz = NULL; workbuf->y0 = NULL; workbuf->y1 = NULL; thread_Kmat_workbuf[tid] = workbuf; } // End of "#pragma omp parallel" h2eri->thread_Kmat_workbuf = (void **) thread_Kmat_workbuf; } // Free each thread's K mat build work buffer void H2ERI_exchange_workbuf_free(H2ERI_p h2eri) { int n_thread = h2eri->h2pack->n_thread; Kmat_workbuf_p *thread_Kmat_workbuf = (Kmat_workbuf_p *) h2eri->thread_Kmat_workbuf; for (int i = 0; i < n_thread; i++) { Kmat_workbuf_p workbuf = thread_Kmat_workbuf[i]; free(workbuf->int_buffer); free(workbuf->vec_out_idx); free(workbuf->nvi_nnz_row_idx); free(workbuf->nvo_nnz_row_idx); free(workbuf->dbl_buffer); free(workbuf); } free(thread_Kmat_workbuf); h2eri->thread_Kmat_workbuf = NULL; } // Find the minimal subtree that covers the given point indices // idx must be in ascending order static void H2ERI_find_minimal_cover_subtree( H2Pack_p h2pack, const int *idx, const int idx_len, int *st_n_leaf_node_, int *st_leaf_nodes, int *st_node_flag, int *st_max_level_ ) { int n_leaf_node = h2pack->n_leaf_node; int n_node = h2pack->n_node; int max_level = h2pack->max_level; int *parent = h2pack->parent; int *leaf_nodes = h2pack->height_nodes; int *mat_cluster = h2pack->mat_cluster; int *level_n_node = h2pack->level_n_node; int *level_nodes = h2pack->level_nodes; // Find all leaf nodes in the minimal cover subtree int st_n_leaf_node = 0; int j_start = 0; for (int i = 0; i < n_leaf_node; i++) { int node = leaf_nodes[i]; int mat_cluster_s = mat_cluster[2 * node]; int mat_cluster_e = mat_cluster[2 * node + 1]; for (int j = j_start; j < idx_len; j++) { if ((mat_cluster_s <= idx[j]) && (idx[j] <= mat_cluster_e)) { st_leaf_nodes[st_n_leaf_node] = node; st_n_leaf_node++; j_start = j; break; } } } *st_n_leaf_node_ = st_n_leaf_node; // Upward pass to mark the whole subtree memset(st_node_flag, 0, sizeof(int) * n_node); for (int i = 0; i < st_n_leaf_node; i++) st_node_flag[st_leaf_nodes[i]] = 1; int st_max_level = 0; for (int i = max_level; i >= 1; i--) { int *level_i_nodes = level_nodes + i * n_leaf_node; int level_i_n_node = level_n_node[i]; for (int j = 0; j < level_i_n_node; j++) { int node = level_i_nodes[j]; if (st_node_flag[node] == 1) { st_node_flag[parent[node]] = 1; if (i > st_max_level) st_max_level = i; } } } *st_max_level_ = st_max_level; } // Find the intersect of two ascending arrays and the corresponding indices in the 1st array static void H2ERI_find_intersect( const int *a, const int a_len, const int *b, const int b_len, int *intersect_len_, int *intersect, int *a_idx ) { int cnt = 0, i = 0, j = 0; while ((i < a_len) && (j < b_len)) { if (a[i] == b[j]) { intersect[cnt] = a[i]; a_idx[cnt] = i; i++; j++; cnt++; } else { int ai = a[i]; int bj = b[j]; if (ai < bj) i++; else j++; } } *intersect_len_ = cnt; } // Update a Kmat_workbuf structure with a selected (M_list[], N| static void H2ERI_exchange_workbuf_update_MN_list( H2ERI_p h2eri, Kmat_workbuf_p workbuf, const int N, const int num_M, const int *M_list, const int *MN_pair_idx ) { int nshell = h2eri->nshell; int *shell_bf_sidx = h2eri->shell_bf_sidx; // (M, N| requires M < N, appears in shell pair list as: // (M_list[k], N| for any k < workbuf->M_cut, // (N, M_list[k]| for any k >= workbuf->M_cut. int N_nbf = shell_bf_sidx[N + 1] - shell_bf_sidx[N]; for (int i = 0; i < num_M; i++) { if (M_list[i] <= N) continue; workbuf->M_cut = i; break; } if (M_list[num_M - 1] <= N) workbuf->M_cut = num_M; // Offset for the output vector indexed by M_list * N int *MN_bfp_sidx = workbuf->MN_bfp_sidx; memset(MN_bfp_sidx, 0, sizeof(int) * (nshell + 1)); for (int i = 0; i < num_M; i++) { int M = M_list[i]; int M_nbf = shell_bf_sidx[M + 1] - shell_bf_sidx[M]; MN_bfp_sidx[i + 1] = MN_bfp_sidx[i] + M_nbf * N_nbf; } // Row indices of M_list * N out of shell pair int *sp_bfp_sidx = h2eri->sp_bfp_sidx; int *row_idx = workbuf->row_idx; int row_idx_len = 0; for (int i = 0; i < num_M; i++) { int M = M_list[i]; int pair_idx = MN_pair_idx[i]; int num_bfp = sp_bfp_sidx[pair_idx + 1] - sp_bfp_sidx[pair_idx]; for (int j = 0; j < num_bfp; j++) row_idx[row_idx_len + j] = sp_bfp_sidx[pair_idx] + j; row_idx_len += num_bfp; } workbuf->row_idx_len = row_idx_len; // Find the minimal covering subtree for row nodes int *row_idx_ascend = workbuf->row_idx_ascend; int *row_idx_pmt = workbuf->row_idx_pmt; for (int i = 0; i < row_idx_len; i++) { row_idx_ascend[i] = row_idx[i]; row_idx_pmt[i] = i; } H2P_qsort_int_key_val(row_idx_ascend, row_idx_pmt, 0, row_idx_len - 1); H2ERI_find_minimal_cover_subtree( h2eri->h2pack, row_idx_ascend, row_idx_len, &workbuf->row_n_leaf_node, workbuf->row_leaf_nodes, workbuf->row_node_flag, &workbuf->row_max_level ); // Calculate the sizes and offsets of vec_out and nvo_nnz_row int n_node = h2eri->h2pack->n_node; int row_n_leaf_node = workbuf->row_n_leaf_node; int vec_out_idx_size = 0; int *mat_cluster = h2eri->h2pack->mat_cluster; int *row_leaf_nodes = workbuf->row_leaf_nodes; int *vec_out_sidx = workbuf->vec_out_sidx; int *nvo_nnz_row_sidx = workbuf->nvo_nnz_row_sidx; vec_out_idx_size = 0; memset(vec_out_sidx, 0, sizeof(int) * (n_node + 1)); memset(nvo_nnz_row_sidx, 0, sizeof(int) * (n_node + 1)); for (int i = 0; i < row_n_leaf_node; i++) { int node = row_leaf_nodes[i]; int nnz_node = 0; int mat_cluster_s = mat_cluster[2 * node]; int mat_cluster_e = mat_cluster[2 * node + 1]; for (int j = 0; j < row_idx_len; j++) if ((mat_cluster_s <= row_idx_ascend[j]) && (row_idx_ascend[j] <= mat_cluster_e)) nnz_node++; vec_out_sidx[node + 1] = nnz_node; nvo_nnz_row_sidx[node + 1] = nnz_node; vec_out_idx_size += nnz_node; } for (int i = 1; i <= n_node; i++) { vec_out_sidx[i] += vec_out_sidx[i - 1]; nvo_nnz_row_sidx[i] += nvo_nnz_row_sidx[i - 1]; } // Calculate vec_out_idx and nvo_nnz_row_idx if (vec_out_idx_size > workbuf->vec_out_idx_size) { free(workbuf->vec_out_idx); free(workbuf->nvo_nnz_row_idx); workbuf->vec_out_idx_size = vec_out_idx_size; workbuf->nvo_nnz_ridx_size = vec_out_idx_size; workbuf->vec_out_idx = (int *) malloc(sizeof(int) * vec_out_idx_size); workbuf->nvo_nnz_row_idx = (int *) malloc(sizeof(int) * vec_out_idx_size); ASSERT_PRINTF(workbuf->vec_out_idx != NULL, "Failed to allocate vec_out_idx of size %d\n", vec_out_idx_size); ASSERT_PRINTF(workbuf->nvo_nnz_row_idx != NULL, "Failed to allocate nvo_nnz_row_idx of size %d\n", vec_out_idx_size); } int *vec_out_idx = workbuf->vec_out_idx; int *nvo_nnz_row_idx = workbuf->nvo_nnz_row_idx; for (int i = 0; i < row_n_leaf_node; i++) { int node = row_leaf_nodes[i]; int nnz_node = 0; int mat_cluster_s = mat_cluster[2 * node]; int mat_cluster_e = mat_cluster[2 * node + 1]; int *vec_out_idx_node = vec_out_idx + vec_out_sidx[node]; int *nvo_nnz_row_idx_node = nvo_nnz_row_idx + nvo_nnz_row_sidx[node]; for (int j = 0; j < row_idx_len; j++) { if (mat_cluster_s <= row_idx_ascend[j] && row_idx_ascend[j] <= mat_cluster_e) { int idx1 = row_idx_pmt[j]; int idx2 = row_idx_ascend[j] - mat_cluster_s; vec_out_idx_node[nnz_node] = idx1; nvo_nnz_row_idx_node[nnz_node] = idx2; nnz_node++; } } // End of j loop } // End of i loop } // Update a Kmat_workbuf structure with a selected |P_list[], Q) static void H2ERI_exchange_workbuf_update_PQ_list( H2ERI_p h2eri, Kmat_workbuf_p workbuf, const int Q, const int num_P0, const int *P_list0, const int *PQ_pair_idx, const int num_D, const int *D_list ) { int nshell = h2eri->nshell; int *shell_bf_sidx = h2eri->shell_bf_sidx; // Filter out significant P shells int num_P = 0; int *P_list = workbuf->P_list; int *P_idx = workbuf->P_idx; H2ERI_find_intersect( P_list0, num_P0, D_list, num_D, &num_P, P_list, P_idx ); workbuf->P_list_len = num_P; if (num_P == 0) return; // |P, Q) requires P < Q, appears in shell pair list as: // |P_list[k], Q) for any k < workbuf->P_cut, // |Q, P_list[k]) for any k >= workbuf->P_cut. int Q_nbf = shell_bf_sidx[Q + 1] - shell_bf_sidx[Q]; for (int i = 0; i < num_P; i++) { if (P_list[i] <= Q) continue; workbuf->P_cut = i; break; } if (P_list[num_P - 1] <= Q) workbuf->P_cut = num_P; // Column indices of P_list * Q out of shell pair int *sp_bfp_sidx = h2eri->sp_bfp_sidx; int *col_idx = workbuf->col_idx; int col_idx_len = 0; for (int i = 0; i < num_P; i++) { int P = P_list[i]; int pair_idx = PQ_pair_idx[P_idx[i]]; int num_bfp = sp_bfp_sidx[pair_idx + 1] - sp_bfp_sidx[pair_idx]; for (int j = 0; j < num_bfp; j++) col_idx[col_idx_len + j] = sp_bfp_sidx[pair_idx] + j; col_idx_len += num_bfp; } workbuf->col_idx_len = col_idx_len; // Find the minimal covering subtree for column nodes int *col_idx_pmt = workbuf->col_idx_pmt; int *col_idx_ipmt = workbuf->col_idx_ipmt; for (int i = 0; i < col_idx_len; i++) col_idx_pmt[i] = i; H2P_qsort_int_key_val(col_idx, col_idx_pmt, 0, col_idx_len - 1); for (int i = 0; i < col_idx_len; i++) col_idx_ipmt[col_idx_pmt[i]] = i; H2ERI_find_minimal_cover_subtree( h2eri->h2pack, col_idx, col_idx_len, &workbuf->col_n_leaf_node, workbuf->col_leaf_nodes, workbuf->col_node_flag, &workbuf->col_max_level ); // Calculate the sizes and offsets of nvi_nnz_row int n_node = h2eri->h2pack->n_node; int col_n_leaf_node = workbuf->col_n_leaf_node; int nvi_nnz_ridx_size = 0; int *mat_cluster = h2eri->h2pack->mat_cluster; int *col_leaf_nodes = workbuf->col_leaf_nodes; int *nvi_nnz_row_sidx = workbuf->nvi_nnz_row_sidx; memset(nvi_nnz_row_sidx, 0, sizeof(int) * (n_node + 1)); for (int i = 0; i < col_n_leaf_node; i++) { int node = col_leaf_nodes[i]; int nnz_node = 0; int mat_cluster_s = mat_cluster[2 * node]; int mat_cluster_e = mat_cluster[2 * node + 1]; for (int j = 0; j < col_idx_len; j++) if ((mat_cluster_s <= col_idx[j]) && (col_idx[j] <= mat_cluster_e)) nnz_node++; nvi_nnz_row_sidx[node + 1] = nnz_node; nvi_nnz_ridx_size += nnz_node; } for (int i = 1; i <= n_node; i++) nvi_nnz_row_sidx[i] += nvi_nnz_row_sidx[i - 1]; // Calculate nvi_nnz_row_idx if (nvi_nnz_ridx_size > workbuf->nvi_nnz_ridx_size) { free(workbuf->nvi_nnz_row_idx); workbuf->nvi_nnz_ridx_size = nvi_nnz_ridx_size; workbuf->nvi_nnz_row_idx = (int *) malloc(sizeof(int) * nvi_nnz_ridx_size); ASSERT_PRINTF(workbuf->nvi_nnz_row_idx != NULL, "Failed to allocate nvi_nnz_row_idx of size %d\n", nvi_nnz_ridx_size); } int *nvi_nnz_row_idx = workbuf->nvi_nnz_row_idx; for (int i = 0; i < col_n_leaf_node; i++) { int node = col_leaf_nodes[i]; int mat_cluster_s = mat_cluster[2 * node]; int mat_cluster_e = mat_cluster[2 * node + 1]; int nnz_node = 0; int *nvi_nnz_row_idx_node = nvi_nnz_row_idx + nvi_nnz_row_sidx[node]; for (int j = 0; j < col_idx_len; j++) { if ((mat_cluster_s <= col_idx[j]) && (col_idx[j] <= mat_cluster_e)) { nvi_nnz_row_idx_node[nnz_node] = col_idx[j] - mat_cluster_s; nnz_node++; } } } } // Allocate dbl_buffer and assign corresponding pointers static void H2ERI_exchange_workbuf_alloc_dbl_buffer( H2ERI_p h2eri, Kmat_workbuf_p workbuf, const int N, const int Q, const int num_M, const int *M_list ) { int n_node = h2eri->h2pack->n_node; int *shell_bf_sidx = h2eri->shell_bf_sidx; int *mat_cluster = h2eri->h2pack->mat_cluster; int N_nbf = shell_bf_sidx[N + 1] - shell_bf_sidx[N]; int Q_nbf = shell_bf_sidx[Q + 1] - shell_bf_sidx[Q]; int nvec = N_nbf * Q_nbf; workbuf->nvec = nvec; // Calculate vec_in_size int vec_in_nrow = 0; int num_P = workbuf->P_list_len; int *P_list = workbuf->P_list; for (int i = 0; i < num_P; i++) { int P = P_list[i]; int P_nbf = shell_bf_sidx[P + 1] - shell_bf_sidx[P]; vec_in_nrow += P_nbf * Q_nbf; } int vec_in_size = vec_in_nrow * nvec; // Calculate vec_out_size int vec_out_nrow = 0; for (int i = 0; i < num_M; i++) { int M = M_list[i]; int M_nbf = shell_bf_sidx[M + 1] - shell_bf_sidx[M]; vec_out_nrow += M_nbf * N_nbf; } int vec_out_size = vec_out_nrow * nvec; // nv{i, o}_nnz are of the same sizes as vec_{in, out} int nvi_nnz_size = vec_in_size; int nvo_nnz_size = vec_out_size; // Set up nvi_nnz_sidx and nvo_nnz_sidx int *nvi_nnz_sidx = workbuf->nvi_nnz_sidx; int *nvo_nnz_sidx = workbuf->nvo_nnz_sidx; int *nvi_nnz_row_sidx = workbuf->nvi_nnz_row_sidx; int *nvo_nnz_row_sidx = workbuf->nvo_nnz_row_sidx; for (int i = 0; i <= h2eri->h2pack->n_node; i++) { nvi_nnz_sidx[i] = nvi_nnz_row_sidx[i] * nvec; nvo_nnz_sidx[i] = nvo_nnz_row_sidx[i] * nvec; } // Calculate y0_size int y0_size = 0; int *col_node_flag = workbuf->col_node_flag; int *y0_sidx = workbuf->y0_sidx; H2P_dense_mat_p *U = h2eri->h2pack->U; y0_sidx[0] = 0; for (int i = 0; i < n_node; i++) { if (col_node_flag[i] == 1) y0_size += U[i]->ncol * nvec; y0_sidx[i + 1] = y0_size; } // Calculate y1_size int y1_size = 0; int *row_node_flag = workbuf->row_node_flag; int *y1_sidx = workbuf->y1_sidx; for (int i = 0; i < n_node; i++) { if (row_node_flag[i] == 1) y1_size += U[i]->ncol * nvec; y1_sidx[i + 1] = y1_size; } int tmp_K_size = h2eri->max_shell_nbf; // Allocate double buffer and assign pointers int dbl_buffer_size = 0; dbl_buffer_size += vec_in_size + vec_out_size; dbl_buffer_size += nvi_nnz_size + nvo_nnz_size; dbl_buffer_size += y0_size + y1_size; dbl_buffer_size += tmp_K_size; if (dbl_buffer_size > workbuf->dbl_buffer_size) { free(workbuf->dbl_buffer); workbuf->dbl_buffer_size = dbl_buffer_size; workbuf->dbl_buffer = (double *) malloc(sizeof(double) * dbl_buffer_size); ASSERT_PRINTF(workbuf->dbl_buffer != NULL, "Failed to allocate dbl_buffer of size %d\n", dbl_buffer_size); } memset(workbuf->dbl_buffer, 0, sizeof(double) * dbl_buffer_size); workbuf->vec_in = workbuf->dbl_buffer; workbuf->vec_out = workbuf->vec_in + vec_in_size; workbuf->nvi_nnz = workbuf->vec_out + vec_out_size; workbuf->nvo_nnz = workbuf->nvi_nnz + nvi_nnz_size; workbuf->y0 = workbuf->nvo_nnz + nvo_nnz_size; workbuf->y1 = workbuf->y0 + y0_size; workbuf->tmp_K = workbuf->y1 + y1_size; } // Gather H2 partial matmul input vectors from density matrix static void H2ERI_build_exchange_gather_vec_in( H2ERI_p h2eri, Kmat_workbuf_p workbuf, const int N, const int Q, const double *den_mat ) { int *shell_bf_sidx = h2eri->shell_bf_sidx; int N_bf_sidx = shell_bf_sidx[N]; int N_nbf = shell_bf_sidx[N + 1] - shell_bf_sidx[N]; int Q_nbf = shell_bf_sidx[Q + 1] - shell_bf_sidx[Q]; int nvec = N_nbf * Q_nbf; // From density matrix to vec_in int P_cut = workbuf->P_cut; int num_bf = h2eri->num_bf; int num_P = workbuf->P_list_len; int curr_row = 0; int *P_list = workbuf->P_list; int *col_idx_ipmt = workbuf->col_idx_ipmt; double *vec_in = workbuf->vec_in; for (int i = 0; i < num_P; i++) { int P = P_list[i]; int P_bf_sidx = shell_bf_sidx[P]; int P_bf_eidx = shell_bf_sidx[P + 1]; int P_nbf = P_bf_eidx - P_bf_sidx; const double *den_mat_ptr = den_mat + P_bf_sidx * num_bf + N_bf_sidx; for (int j = 0; j < Q_nbf; j++) { int row_idx_s, row_idx_e, row_idx_inc, col_idx_s; if (i < P_cut) { row_idx_s = curr_row + j * P_nbf + 0; row_idx_e = curr_row + j * P_nbf + P_nbf; row_idx_inc = 1; } else { row_idx_s = curr_row + j + Q_nbf * 0; row_idx_e = curr_row + j + Q_nbf * P_nbf; row_idx_inc = Q_nbf; } col_idx_s = j * N_nbf + 0; int k = 0; for (int row_idx0 = row_idx_s; row_idx0 < row_idx_e; row_idx0 += row_idx_inc) { int row_idx1 = col_idx_ipmt[row_idx0]; double *dst = vec_in + row_idx1 * nvec + col_idx_s; const double *src = den_mat_ptr + k * num_bf; memcpy(dst, src, sizeof(double) * N_nbf); k++; } } // End of j loop curr_row += P_nbf * Q_nbf; } // End of i loop // Copy data from vec_in to nvi_nnz int col_idx_len = workbuf->col_idx_len; int col_n_leaf_node = workbuf->col_n_leaf_node; int *mat_cluster = h2eri->h2pack->mat_cluster; int *col_leaf_nodes = workbuf->col_leaf_nodes; int *col_idx = workbuf->col_idx; int *nvi_nnz_sidx = workbuf->nvi_nnz_sidx; double *nvi_nnz = workbuf->nvi_nnz; for (int i = 0; i < col_n_leaf_node; i++) { int node = col_leaf_nodes[i]; int mat_cluster_s = mat_cluster[2 * node]; int mat_cluster_e = mat_cluster[2 * node + 1]; int nnz_node = 0; double *nvi_nnz_node = nvi_nnz + nvi_nnz_sidx[node]; for (int j = 0; j < col_idx_len; j++) { if ((mat_cluster_s <= col_idx[j]) && (col_idx[j] <= mat_cluster_e)) { double *dst = nvi_nnz_node + nnz_node * nvec; double *src = vec_in + j * nvec; memcpy(dst, src, sizeof(double) * nvec); nnz_node++; } } } } // Scatter H2 partial matmul output vectors to exchange matrix static void H2ERI_build_exchange_scatter_vec_out( H2ERI_p h2eri, Kmat_workbuf_p workbuf, const int N, const int Q, const int num_M, const int *M_list, double *K_mat ) { int *shell_bf_sidx = h2eri->shell_bf_sidx; int N_nbf = shell_bf_sidx[N + 1] - shell_bf_sidx[N]; int Q_nbf = shell_bf_sidx[Q + 1] - shell_bf_sidx[Q]; int nvec = N_nbf * Q_nbf; // Copy data from nvo_nnz to vec_out int row_n_leaf_node = workbuf->row_n_leaf_node; int *row_leaf_nodes = workbuf->row_leaf_nodes; int *vec_out_idx = workbuf->vec_out_idx; int *vec_out_sidx = workbuf->vec_out_sidx; int *nvo_nnz_sidx = workbuf->nvo_nnz_sidx; double *vec_out = workbuf->vec_out; double *nvo_nnz = workbuf->nvo_nnz; for (int i = 0; i < row_n_leaf_node; i++) { int node = row_leaf_nodes[i]; int vec_out_idx_len = vec_out_sidx[node + 1] - vec_out_sidx[node]; int *vec_out_idx_i = vec_out_idx + vec_out_sidx[node]; double *nvo_nnz_node = nvo_nnz + nvo_nnz_sidx[node]; for (int j = 0; j < vec_out_idx_len; j++) { double *dst = vec_out + vec_out_idx_i[j] * nvec; double *src = nvo_nnz_node + j * nvec; memcpy(dst, src, sizeof(double) * nvec); } } // From vec_out to K_mat int num_bf = h2eri->num_bf; int M_cut = workbuf->M_cut; int *MN_bfp_sidx = workbuf->MN_bfp_sidx; double *tmp_K = workbuf->tmp_K; for (int i = 0; i < num_M; i++) { int M = M_list[i]; int M_bf_sidx = shell_bf_sidx[M]; int M_bf_eidx = shell_bf_sidx[M + 1]; int M_nbf = M_bf_eidx - M_bf_sidx; int row_idx0_s, row_idx0_e, s0, s1, s2; if (i < M_cut) { row_idx0_s = MN_bfp_sidx[i]; row_idx0_e = MN_bfp_sidx[i] + M_nbf; s0 = 1; s1 = 0; s2 = M_nbf; } else { row_idx0_s = MN_bfp_sidx[i]; row_idx0_e = MN_bfp_sidx[i] + M_nbf * N_nbf; s0 = N_nbf; s1 = 0; s2 = 1; } for (int j = 0; j < Q_nbf; j++) { memset(tmp_K, 0, sizeof(double) * M_nbf); for (int k = 0; k < N_nbf; k++) { int row_idx0_s_k = row_idx0_s + (k - s1) * s2; int vec_out_col = j * N_nbf + k; for (int l = 0; l < M_nbf; l++) { int vec_out_row_offset = (row_idx0_s_k + l * s0) * nvec; tmp_K[l] += vec_out[vec_out_row_offset + vec_out_col]; } } // End of k loop for (int l = 0; l < M_nbf; l++) { int K_mat_col = shell_bf_sidx[Q] + j; int K_mat_row_offset = (M_bf_sidx + l) * num_bf; double *K_mat_ptr = K_mat + K_mat_row_offset + K_mat_col; atomic_add_f64(K_mat_ptr, tmp_K[l]); } } // End of j loop } // End of i loop } // Build dlist according to density matrix static void H2ERI_build_exchange_dlist(H2ERI_p h2eri, const double *den_mat) { int num_bf = h2eri->num_bf; int nshell = h2eri->nshell; if (h2eri->dlist_sidx == NULL) { h2eri->dlist_sidx = (int *) malloc(sizeof(int) * (nshell + 1)); ASSERT_PRINTF(h2eri->dlist_sidx != NULL, "Failed to allocate dlist_sidx of size %d\n", nshell + 1); } int *dlist_sidx = h2eri->dlist_sidx; int *shell_bf_sidx = h2eri->shell_bf_sidx; int *dlist0 = (int *) malloc(sizeof(int) * nshell * nshell); int *dlist_cnt = (int *) malloc(sizeof(int) * nshell); ASSERT_PRINTF(dlist0 != NULL, "Failed to allocate dlist0 of size %d\n", nshell * nshell); ASSERT_PRINTF(dlist_cnt != NULL, "Failed to allocate dlist_cnt of size %d\n", nshell); const double DTOL = 1e-10; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < nshell; i++) { int srow = shell_bf_sidx[i]; int erow = shell_bf_sidx[i + 1]; int cnt = 0; int *dlist0_i = dlist0 + i * nshell; for (int j = 0; j < nshell; j++) { int scol = shell_bf_sidx[j]; int ecol = shell_bf_sidx[j + 1]; int flag = 0; for (int irow = srow; irow < erow; irow++) { const double *den_mat_irow = den_mat + irow * num_bf; for (int icol = scol; icol < ecol; icol++) { if (fabs(den_mat_irow[icol]) > DTOL) { flag = 1; break; } } if (flag == 1) break; } if (flag == 1) { dlist0_i[cnt] = j; cnt++; } } // End of j loop dlist_cnt[i] = cnt; } // End of i loop dlist_sidx[0] = 0; for (int i = 1; i <= nshell; i++) dlist_sidx[i] = dlist_sidx[i - 1] + dlist_cnt[i - 1]; int dlist_size = dlist_sidx[nshell]; free(h2eri->dlist); int *dlist = (int *) malloc(sizeof(int) * dlist_size); ASSERT_PRINTF(dlist != NULL, "Failed to allocate dlist of size %d\n", dlist_size); h2eri->dlist = dlist; for (int i = 0; i < nshell; i++) memcpy(dlist + dlist_sidx[i], dlist0 + i * nshell, sizeof(int) * dlist_cnt[i]); free(dlist_cnt); free(dlist0); } // Copy a sub-matrix of specified rows and columns static void H2ERI_copy_submatrix( const double *mat, const int ldm, const int ncol, double *submat, const int lds, const int submat_nrow, const int *submat_row_idx, const int submat_ncol, const int *submat_col_idx ) { if (ncol > submat_ncol) { for (int i = 0; i < submat_nrow; i++) { const double *mat_row_i = mat + submat_row_idx[i] * ldm; double *submat_row_i = submat + i * lds; for (int j = 0; j < submat_ncol; j++) submat_row_i[j] = mat_row_i[submat_col_idx[j]]; } } else { size_t row_bytes = sizeof(double) * ncol; for (int i = 0; i < submat_nrow; i++) { const double *mat_row_i = mat + submat_row_idx[i] * ldm; double *submat_row_i = submat + i * lds; memcpy(submat_row_i, mat_row_i, row_bytes); } } } // Perform matmul for a sub-matrix of B or D block blk which might // be a dense block or a low-rank approximation blk = U * V static void H2ERI_BD_blk_submat_matmul( const int trans_blk, H2P_dense_mat_p blk, H2P_dense_mat_p tmp_mat, int *tmp_idx, const double *mat_in, double *mat_out, const int nvec, const int submat_nrow, const int *submat_row_idx, const int submat_ncol, const int *submat_col_idx ) { if (blk->ld > 0) { if (trans_blk == 0) { H2P_dense_mat_resize(tmp_mat, submat_nrow, submat_ncol); H2ERI_copy_submatrix( blk->data, blk->ld, blk->ncol, tmp_mat->data, tmp_mat->ld, submat_nrow, submat_row_idx, submat_ncol, submat_col_idx ); CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, submat_nrow, nvec, submat_ncol, 1.0, tmp_mat->data, tmp_mat->ld, mat_in, nvec, 1.0, mat_out, nvec ); } else { H2P_dense_mat_resize(tmp_mat, submat_ncol, submat_nrow); H2ERI_copy_submatrix( blk->data, blk->ld, blk->ncol, tmp_mat->data, tmp_mat->ld, submat_ncol, submat_col_idx, submat_nrow, submat_row_idx ); CBLAS_GEMM( CblasRowMajor, CblasTrans, CblasNoTrans, submat_nrow, nvec, submat_ncol, 1.0, tmp_mat->data, tmp_mat->ld, mat_in, nvec, 1.0, mat_out, nvec ); } } else { int blk_rank = -blk->ld; double *U_mat = blk->data; double *VT_mat = U_mat + blk->nrow * blk_rank; for (int i = 0; i < blk_rank; i++) tmp_idx[i] = i; // U: blk->nrow * blk_rank // V: blk_rank * blk->ncol // Note: V^T instead of V is stored if (trans_blk == 0) { // mat_out = (U * V) * mat_in = U * (V * mat_in) int tmp_mat_size = blk_rank * nvec; // tmp_v tmp_mat_size += blk_rank * submat_ncol; // Sub-matrix of V tmp_mat_size += submat_nrow * blk_rank; // Sub-matrix of U H2P_dense_mat_resize(tmp_mat, blk_rank, nvec); double *tmp_v = tmp_mat->data; double *VT_submat = tmp_v + blk_rank * nvec; double *U_submat = VT_submat + blk_rank * submat_ncol; H2ERI_copy_submatrix( VT_mat, blk_rank, blk_rank, VT_submat, blk_rank, submat_ncol, submat_col_idx, blk_rank, tmp_idx ); H2ERI_copy_submatrix( U_mat, blk_rank, blk_rank, U_submat, blk_rank, submat_nrow, submat_row_idx, blk_rank, tmp_idx ); CBLAS_GEMM( CblasRowMajor, CblasTrans, CblasNoTrans, blk_rank, nvec, submat_ncol, 1.0, VT_submat, blk_rank, mat_in, nvec, 0.0, tmp_v, nvec ); CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, submat_nrow, nvec, blk_rank, 1.0, U_submat, blk_rank, tmp_v, nvec, 1.0, mat_out, nvec ); } else { // mat_out = (U * V)^T * mat_in = V^T * (U^T * mat_in) int tmp_mat_size = blk_rank * nvec; // tmp_v tmp_mat_size += submat_ncol * blk_rank; // Sub-matrix of U tmp_mat_size += blk_rank * submat_nrow; // Sub-matrix of V H2P_dense_mat_resize(tmp_mat, blk_rank, nvec); double *tmp_v = tmp_mat->data; double *U_submat = tmp_v + blk_rank * nvec; double *VT_submat = U_submat + submat_ncol * blk_rank; H2ERI_copy_submatrix( U_mat, blk_rank, blk_rank, U_submat, blk_rank, submat_ncol, submat_col_idx, blk_rank, tmp_idx ); CBLAS_GEMM( CblasRowMajor, CblasTrans, CblasNoTrans, blk_rank, nvec, submat_ncol, 1.0, U_submat, blk_rank, mat_in, nvec, 0.0, tmp_v, nvec ); H2ERI_copy_submatrix( VT_mat, blk_rank, blk_rank, VT_submat, blk_rank, submat_nrow, submat_row_idx, blk_rank, tmp_idx ); CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, submat_nrow, nvec, blk_rank, 1.0, VT_submat, blk_rank, tmp_v, nvec, 1.0, mat_out, nvec ); } } } // Perform matmul for a B or D block blk which might be a dense block // or a low-rank approximation blk = U * V static void H2ERI_BD_blk_matmul( const int trans_blk, H2P_dense_mat_p blk, H2P_dense_mat_p tmp_v, const double *mat_in, double *mat_out, const int nvec ) { if (blk->ld > 0) { if (trans_blk == 0) { CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, blk->nrow, nvec, blk->ncol, 1.0, blk->data, blk->ld, mat_in, nvec, 1.0, mat_out, nvec ); } else { CBLAS_GEMM( CblasRowMajor, CblasTrans, CblasNoTrans, blk->ncol, nvec, blk->nrow, 1.0, blk->data, blk->ld, mat_in, nvec, 1.0, mat_out, nvec ); } } else { int blk_rank = -blk->ld; double *U_mat = blk->data; double *VT_mat = U_mat + blk->nrow * blk_rank; // U: blk->nrow * blk_rank // V: blk_rank * blk->ncol // Note: V^T instead of V is stored H2P_dense_mat_resize(tmp_v, blk_rank, nvec); if (trans_blk == 0) { // mat_out = (U * V) * mat_in = U * (V * mat_in) CBLAS_GEMM( CblasRowMajor, CblasTrans, CblasNoTrans, blk_rank, nvec, blk->ncol, 1.0, VT_mat, blk_rank, mat_in, nvec, 0.0, tmp_v->data, nvec ); CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, blk->nrow, nvec, blk_rank, 1.0, U_mat, blk_rank, tmp_v->data, nvec, 1.0, mat_out, nvec ); } else { // mat_out = (U * V)^T * mat_in = V^T * (U^T * mat_in) CBLAS_GEMM( CblasRowMajor, CblasTrans, CblasNoTrans, blk_rank, nvec, blk->nrow, 1.0, U_mat, blk_rank, mat_in, nvec, 0.0, tmp_v->data, nvec ); CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, blk->ncol, nvec, blk_rank, 1.0, VT_mat, blk_rank, tmp_v->data, nvec, 1.0, mat_out, nvec ); } } } static void H2ERI_build_exchange_H2_matmul_partial(H2ERI_p h2eri, Kmat_workbuf_p workbuf, const int tid) { H2Pack_p h2pack = h2eri->h2pack; int n_node = h2pack->n_node; int max_child = h2pack->max_child; int n_leaf_node = h2pack->n_leaf_node; int n_r_adm_pair = h2pack->n_r_adm_pair; int n_r_inadm_pair = h2pack->n_r_inadm_pair; int *children = h2pack->children; int *n_child = h2pack->n_child; int *level_n_node = h2pack->level_n_node; int *level_nodes = h2pack->level_nodes; int *node_level = h2pack->node_level; int *leaf_nodes = h2pack->height_nodes; int *r_adm_pairs = h2pack->r_adm_pairs; int *r_inadm_pairs = h2pack->r_inadm_pairs; H2P_dense_mat_p *U = h2pack->U; H2P_dense_mat_p *c_B_blks = h2eri->c_B_blks; H2P_dense_mat_p *c_D_blks = h2eri->c_D_blks; H2P_dense_mat_p tmp_mat = h2pack->tb[tid]->mat0; H2P_int_vec_p tmp_idx0 = h2pack->tb[tid]->idx0; H2P_int_vec_p tmp_idx1 = h2pack->tb[tid]->idx1; int *node_adm_pairs = h2eri->node_adm_pairs; int *node_adm_pairs_sidx = h2eri->node_adm_pairs_sidx; int *node_inadm_pairs = h2eri->node_inadm_pairs; int *node_inadm_pairs_sidx = h2eri->node_inadm_pairs_sidx; int nvec = workbuf->nvec; int row_max_level = workbuf->row_max_level; int col_max_level = workbuf->col_max_level; int *row_node_flag = workbuf->row_node_flag; int *col_node_flag = workbuf->col_node_flag; int *nvi_nnz_row_idx = workbuf->nvi_nnz_row_idx; int *nvo_nnz_row_idx = workbuf->nvo_nnz_row_idx; int *nvi_nnz_row_sidx = workbuf->nvi_nnz_row_sidx; int *nvo_nnz_row_sidx = workbuf->nvo_nnz_row_sidx; int *nvi_nnz_sidx = workbuf->nvi_nnz_sidx; int *nvo_nnz_sidx = workbuf->nvo_nnz_sidx; int *y0_sidx = workbuf->y0_sidx; int *y1_sidx = workbuf->y1_sidx; int *tmp_idx2 = workbuf->tmp_arr; double *nvi_nnz = workbuf->nvi_nnz; double *nvo_nnz = workbuf->nvo_nnz; double *y0 = workbuf->y0; double *y1 = workbuf->y1; int *B_p2i_rowptr = h2pack->B_p2i_rowptr; int *B_p2i_colidx = h2pack->B_p2i_colidx; int *B_p2i_val = h2pack->B_p2i_val; int *D_p2i_rowptr = h2pack->D_p2i_rowptr; int *D_p2i_colidx = h2pack->D_p2i_colidx; int *D_p2i_val = h2pack->D_p2i_val; double st, et; double *timers = workbuf->timers; // y0, y1, vec_out have been set to zero in H2ERI_exchange_workbuf_alloc_dbl_buffer() // 1. Find forward & backward transformation minimal levels st = get_wtime_sec(); int fwd_minlvl = 19241112, bwd_minlvl = 19241112; for (int node0 = 0; node0 < n_node; node0++) { if (row_node_flag[node0] == 0) continue; int node0_n_adm_pair = node_adm_pairs_sidx[node0 + 1] - node_adm_pairs_sidx[node0]; int *node0_adm_pairs = node_adm_pairs + node_adm_pairs_sidx[node0]; int level0 = node_level[node0]; int cnt = 0; for (int j = 0; j < node0_n_adm_pair; j++) { int node1 = node0_adm_pairs[j]; int level1 = node_level[node1]; if (col_node_flag[node1] == 0) continue; if (level1 < fwd_minlvl) fwd_minlvl = level1; cnt++; } if ((cnt > 0) && (level0 < bwd_minlvl)) bwd_minlvl = level0; } if (fwd_minlvl < h2pack->min_adm_level) fwd_minlvl = h2pack->min_adm_level; if (bwd_minlvl < h2pack->min_adm_level) bwd_minlvl = h2pack->min_adm_level; int only_Dij = 0; if (col_max_level < fwd_minlvl) only_Dij = 1; if (row_max_level < bwd_minlvl) only_Dij = 1; et = get_wtime_sec(); timers[BUILD_K_MM_FWD_TIMER_IDX] += et - st; // 2. Forward transformation st = get_wtime_sec(); for (int i = col_max_level; i >= fwd_minlvl; i--) { if (only_Dij) continue; int *level_i_nodes = level_nodes + i * n_leaf_node; int level_i_n_node = level_n_node[i]; for (int j = 0; j < level_i_n_node; j++) { int node = level_i_nodes[j]; if (col_node_flag[node] == 0) continue; int child_cnt = 0; int U_srow = 0; int n_child_node = n_child[node]; H2P_dense_mat_p U_node = U[node]; double *y0_node = y0 + y0_sidx[node]; int *node_children = children + node * max_child; for (int k = 0; k < n_child_node; k++) { int child_k = node_children[k]; int y0_k_nrow = U[child_k]->ncol; double *y0_k = y0 + y0_sidx[child_k]; double *U_node_k = U_node->data + U_srow * U_node->ld; if (col_node_flag[child_k]) { CBLAS_GEMM( CblasRowMajor, CblasTrans, CblasNoTrans, U_node->ncol, nvec, y0_k_nrow, 1.0, U_node_k, U_node->ld, y0_k, nvec, 1.0, y0_node, nvec ); child_cnt++; } U_srow += y0_k_nrow; } // End of k loop if (child_cnt == 0) { double *B = nvi_nnz + nvi_nnz_sidx[node]; int num_nnz_row = nvi_nnz_row_sidx[node + 1] - nvi_nnz_row_sidx[node]; int *nnz_row = nvi_nnz_row_idx + nvi_nnz_row_sidx[node]; H2P_dense_mat_resize(tmp_mat, num_nnz_row, U_node->ncol); for (int l = 0; l < num_nnz_row; l++) { double *src = U_node->data + nnz_row[l] * U_node->ncol; double *dst = tmp_mat->data + l * U_node->ncol; memcpy(dst, src, sizeof(double) * U_node->ncol); } CBLAS_GEMM( CblasRowMajor, CblasTrans, CblasNoTrans, tmp_mat->ncol, nvec, num_nnz_row, 1.0, tmp_mat->data, tmp_mat->ld, B, nvec, 1.0, y0_node, nvec ); } } // End of j loop } // End of i loop et = get_wtime_sec(); timers[BUILD_K_MM_FWD_TIMER_IDX] += et - st; // 3. Intermediate multiplication st = get_wtime_sec(); for (int node0 = 0; node0 < n_node; node0++) { if (only_Dij) continue; if (row_node_flag[node0] == 0) continue; int node0_n_adm_pair = node_adm_pairs_sidx[node0 + 1] - node_adm_pairs_sidx[node0]; int *node0_adm_pairs = node_adm_pairs + node_adm_pairs_sidx[node0]; int node0_num_nnz_row = nvo_nnz_row_sidx[node0 + 1] - nvo_nnz_row_sidx[node0]; int *node0_nnz_row_idx = nvo_nnz_row_idx + nvo_nnz_row_sidx[node0]; int y1_node0_nrow = (y1_sidx[node0 + 1] - y1_sidx[node0]) / nvec; H2P_int_vec_set_capacity(tmp_idx0, y1_node0_nrow); for (int k = 0; k < y1_node0_nrow; k++) tmp_idx0->data[k] = k; tmp_idx0->length = y1_node0_nrow; for (int j = 0; j < node0_n_adm_pair; j++) { int node1 = node0_adm_pairs[j]; if (col_node_flag[node1] == 0) continue; int node1_num_nnz_row = nvi_nnz_row_sidx[node1 + 1] - nvi_nnz_row_sidx[node1]; int *node1_nnz_row_idx = nvi_nnz_row_idx + nvi_nnz_row_sidx[node1]; int y0_node1_nrow = (y0_sidx[node1 + 1] - y0_sidx[node1]) / nvec; H2P_int_vec_set_capacity(tmp_idx1, y0_node1_nrow); for (int k = 0; k < y0_node1_nrow; k++) tmp_idx1->data[k] = k; tmp_idx1->length = y0_node1_nrow; int pair_idx_ij = H2P_get_int_CSR_elem(B_p2i_rowptr, B_p2i_colidx, B_p2i_val, node0, node1); ASSERT_PRINTF(pair_idx_ij != 0, "Cannot find Bij for i = %d, j = %d\n", node0, node1); int trans_blk; H2P_dense_mat_p Bi; if (pair_idx_ij > 0) { trans_blk = 0; Bi = c_B_blks[pair_idx_ij - 1]; } else { trans_blk = 1; Bi = c_B_blks[-pair_idx_ij - 1]; } int level0 = node_level[node0]; int level1 = node_level[node1]; // (1) Two nodes are of the same level, compress on both sides if (level0 == level1) { double *y0_node1 = y0 + y0_sidx[node1]; double *y1_node0 = y1 + y1_sidx[node0]; H2ERI_BD_blk_matmul(trans_blk, Bi, tmp_mat, y0_node1, y1_node0, nvec); } // (2) node1 is a leaf node and its level is higher than node0's level, // only compress on node0's side if (level0 > level1) { double *node1_vec_in = nvi_nnz + nvi_nnz_sidx[node1]; double *y1_node0 = y1 + y1_sidx[node0]; H2ERI_BD_blk_submat_matmul( trans_blk, Bi, tmp_mat, tmp_idx2, node1_vec_in, y1_node0, nvec, tmp_idx0->length, tmp_idx0->data, node1_num_nnz_row, node1_nnz_row_idx ); } // (3) node0 is a leaf node and its level is higher than node1's level, // only compress on node1's side if (level0 < level1) { double *y0_node1 = y0 + y0_sidx[node1]; double *node0_vec_out = nvo_nnz + nvo_nnz_sidx[node0]; H2ERI_BD_blk_submat_matmul( trans_blk, Bi, tmp_mat, tmp_idx2, y0_node1, node0_vec_out, nvec, node0_num_nnz_row, node0_nnz_row_idx, tmp_idx1->length, tmp_idx1->data ); } } // End of j loop } // End of node0 loop et = get_wtime_sec(); timers[BUILD_K_MM_MID_TIMER_IDX] += et - st; // 4. Backward transformation st = get_wtime_sec(); for (int i = bwd_minlvl; i <= row_max_level; i++) { if (only_Dij) continue; int *level_i_nodes = level_nodes + i * n_leaf_node; int level_i_n_node = level_n_node[i]; for (int j = 0; j < level_i_n_node; j++) { int node = level_i_nodes[j]; if (row_node_flag[node] == 0) continue; int child_cnt = 0; int U_srow = 0; int n_child_node = n_child[node]; H2P_dense_mat_p U_node = U[node]; double *y1_node = y1 + y1_sidx[node]; int *node_children = children + node * max_child; for (int k = 0; k < n_child_node; k++) { int child_k = node_children[k]; int y1_k_nrow = U[child_k]->ncol; double *y1_k = y1 + y1_sidx[child_k]; double *U_node_k = U_node->data + U_srow * U_node->ld; if (row_node_flag[child_k]) { CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, y1_k_nrow, nvec, U_node->ncol, 1.0, U_node_k, U_node->ld, y1_node, nvec, 1.0, y1_k, nvec ); child_cnt++; } U_srow += y1_k_nrow; } // End of k loop if (child_cnt == 0) { double *C = nvo_nnz + nvo_nnz_sidx[node]; int num_nnz_row = nvo_nnz_row_sidx[node + 1] - nvo_nnz_row_sidx[node]; int *nnz_row = nvo_nnz_row_idx + nvo_nnz_row_sidx[node]; H2P_dense_mat_resize(tmp_mat, num_nnz_row, U_node->ncol); for (int l = 0; l < num_nnz_row; l++) { double *src = U_node->data + nnz_row[l] * U_node->ncol; double *dst = tmp_mat->data + l * U_node->ncol; memcpy(dst, src, sizeof(double) * U_node->ncol); } CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasNoTrans, num_nnz_row, nvec, tmp_mat->ncol, 1.0, tmp_mat->data, tmp_mat->ld, y1_node, nvec, 1.0, C, nvec ); } } // End of j loop } // End of i loop et = get_wtime_sec(); timers[BUILD_K_MM_BWD_TIMER_IDX] += et - st; // 5. Dense multiplication st = get_wtime_sec(); for (int node0 = 0; node0 < n_node; node0++) { if (row_node_flag[node0] == 0) continue; double *node0_vec_out = nvo_nnz + nvo_nnz_sidx[node0]; int node0_n_inadm_pair = node_inadm_pairs_sidx[node0 + 1] - node_inadm_pairs_sidx[node0]; int *node0_inadm_pairs = node_inadm_pairs + node_inadm_pairs_sidx[node0]; int node0_num_nnz_row = nvo_nnz_row_sidx[node0 + 1] - nvo_nnz_row_sidx[node0]; int *node0_nnz_row_idx = nvo_nnz_row_idx + nvo_nnz_row_sidx[node0]; for (int j = 0; j < node0_n_inadm_pair; j++) { int node1 = node0_inadm_pairs[j]; if (col_node_flag[node1] == 0) continue; double *node1_vec_in = nvi_nnz + nvi_nnz_sidx[node1]; int node1_num_nnz_row = nvi_nnz_row_sidx[node1 + 1] - nvi_nnz_row_sidx[node1]; int *node1_nnz_row_idx = nvi_nnz_row_idx + nvi_nnz_row_sidx[node1]; int pair_idx_ij = H2P_get_int_CSR_elem(D_p2i_rowptr, D_p2i_colidx, D_p2i_val, node0, node1); ASSERT_PRINTF(pair_idx_ij != 0, "Cannot find Dij for i = %d, j = %d\n", node0, node1); int trans_blk; H2P_dense_mat_p Di; if (pair_idx_ij > 0) { trans_blk = 0; Di = c_D_blks[pair_idx_ij - 1]; } else { trans_blk = 1; Di = c_D_blks[-pair_idx_ij - 1]; } H2ERI_BD_blk_submat_matmul( trans_blk, Di, tmp_mat, tmp_idx2, node1_vec_in, node0_vec_out, nvec, node0_num_nnz_row, node0_nnz_row_idx, node1_num_nnz_row, node1_nnz_row_idx ); } // End of j loop } // End of node0 loop et = get_wtime_sec(); timers[BUILD_K_MM_DEN_TIMER_IDX] += et - st; } // Build the exchange matrix with the density matrix and H2 representation of the ERI tensor void H2ERI_build_exchange(H2ERI_p h2eri, const double *den_mat, double *K_mat) { ASSERT_PRINTF(h2eri->h2pack->BD_JIT == 0, "H2ERI_build_exchange does not support BD JIT build\n"); H2ERI_build_exchange_dlist(h2eri, den_mat); int num_bf = h2eri->num_bf; int nshell = h2eri->nshell; int n_thread = h2eri->h2pack->n_thread; int *plist = h2eri->plist; int *plist_idx = h2eri->plist_idx; int *plist_sidx = h2eri->plist_sidx; int *dlist = h2eri->dlist; int *dlist_sidx = h2eri->dlist_sidx; BLAS_SET_NUM_THREADS(1); #pragma omp parallel for for (int i = 0; i < num_bf * num_bf; i++) K_mat[i] = 0; Kmat_workbuf_p *thread_Kmat_workbuf = (Kmat_workbuf_p *) h2eri->thread_Kmat_workbuf; #pragma omp parallel num_threads(n_thread) { int tid = omp_get_thread_num(); Kmat_workbuf_p workbuf = thread_Kmat_workbuf[tid]; double st, et; double *timers = workbuf->timers; memset(timers, 0, sizeof(double) * 5); #pragma omp for schedule(dynamic) for (int N = 0; N < nshell; N++) { int num_M = plist_sidx[N + 1] - plist_sidx[N]; int *M_list = plist + plist_sidx[N]; int *MN_pair_idx = plist_idx + plist_sidx[N]; st = get_wtime_sec(); H2ERI_exchange_workbuf_update_MN_list(h2eri, workbuf, N, num_M, M_list, MN_pair_idx); et = get_wtime_sec(); timers[BUILD_K_AUX_TIMER_IDX] += et - st; int num_D = dlist_sidx[N + 1] - dlist_sidx[N]; int *D_list = dlist + dlist_sidx[N]; for (int Q = 0; Q < nshell; Q++) { int num_P0 = plist_sidx[Q + 1] - plist_sidx[Q]; int *P_list0 = plist + plist_sidx[Q]; int *PQ_pair_idx = plist_idx + plist_sidx[Q]; st = get_wtime_sec(); H2ERI_exchange_workbuf_update_PQ_list( h2eri, workbuf, Q, num_P0, P_list0, PQ_pair_idx, num_D, D_list ); et = get_wtime_sec(); timers[BUILD_K_AUX_TIMER_IDX] += et - st; st = get_wtime_sec(); H2ERI_exchange_workbuf_alloc_dbl_buffer(h2eri, workbuf, N, Q, num_M, M_list); et = get_wtime_sec(); timers[BUILD_K_AUX_TIMER_IDX] += et - st; st = get_wtime_sec(); H2ERI_build_exchange_gather_vec_in(h2eri, workbuf, N, Q, den_mat); et = get_wtime_sec(); timers[BUILD_K_AUX_TIMER_IDX] += et - st; H2ERI_build_exchange_H2_matmul_partial(h2eri, workbuf, tid); st = get_wtime_sec(); H2ERI_build_exchange_scatter_vec_out(h2eri, workbuf, N, Q, num_M, M_list, K_mat); et = get_wtime_sec(); timers[BUILD_K_AUX_TIMER_IDX] += et - st; } // End of Q loop } // End of N loop } // End of "#pragma omp parallel" BLAS_SET_NUM_THREADS(n_thread); double build_K_timers[5] = {0, 0, 0, 0, 0}; for (int i = 0; i < 5; i++) { for (int j = 0; j < n_thread; j++) { double timer_ij = thread_Kmat_workbuf[j]->timers[i]; if (timer_ij > build_K_timers[i]) build_K_timers[i] = timer_ij; } } double *h2pack_timers = h2eri->h2pack->timers; h2pack_timers[MV_VOP_TIMER_IDX] += build_K_timers[BUILD_K_AUX_TIMER_IDX]; h2pack_timers[MV_FWD_TIMER_IDX] += build_K_timers[BUILD_K_MM_FWD_TIMER_IDX]; h2pack_timers[MV_MID_TIMER_IDX] += build_K_timers[BUILD_K_MM_MID_TIMER_IDX]; h2pack_timers[MV_BWD_TIMER_IDX] += build_K_timers[BUILD_K_MM_BWD_TIMER_IDX]; h2pack_timers[MV_DEN_TIMER_IDX] += build_K_timers[BUILD_K_MM_DEN_TIMER_IDX]; h2eri->h2pack->n_matvec++; }
defaultshared-clause.c
/* gcc -fopenmp -O2 shared-clause.c -o shared-clause */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif main() { int i, n = 7; int a[n]; for (i=0; i<n; i++) a[i] = i+1; // Privadas -> i // Compartidas -> a,n // Si el vector o matriz la pusieramos como private // 1- Estariamos reservando una copia local de cada uno de ellos en las hebras // 2- Copiariamos solo el puntero y no sus datos, por lo que tendriamos basura #pragma omp parallel for shared(a,n) private(i) default(none) for (i=0; i<n; i++) a[i] += i; printf("Después de parallel for:\n"); for (i=0; i<n; i++) printf("a[%d] = %d\n",i,a[i]); }
GB_unaryop__minv_fp64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp64_fp32 // op(A') function: GB_tran__minv_fp64_fp32 // C type: double // A type: float // cast: double cij = (double) aij // unaryop: cij = 1./aij #define GB_ATYPE \ float #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1./x ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp64_fp32 ( double *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
modularity_spectral.c
#include <time.h> #include "graph_partitioning.h" #include "utils.h" typedef struct { double *dist; /* stores the value */ int *vertex; /* map from the index of dist to vertex */ int *map; /* map from vertex to index of dist */ int *from; /* map from vertex to its parents in djkstra */ int size; } heap; void computeEigen(graph_t *G, double *eigenVectorOld, double *eigenVectorNew, attr_id_t *v2C, attr_id_t *v2pos, attr_id_t* degree, attr_id_t *vertex, attr_id_t *toSplit, attr_id_t currCommunity, attr_id_t communitySize, attr_id_t degreeSum); void computeModularityValue(graph_t *G, attr_id_t *membership, attr_id_t numCommunities, double *modularity) { attr_id_t i,j; attr_id_t n, m; attr_id_t comm; double mod=0.0; double degree_u,degree_v; n = G->n; m = G->m; for(i=0; i<n; i++) { comm = membership[i]; degree_u = (double)G->numEdges[i+1] - G->numEdges[i]; for(j=G->numEdges[i]; j<G->numEdges[i+1]; j++) { if(membership[G->endV[j]] == comm) mod +=1.0; } for(j=0; j<G->n; j++) { degree_v = (double)G->numEdges[j+1] - G->numEdges[j]; if(comm == membership[j]) mod -= (double)(degree_u*degree_v)/(double)(2.0*G->m); } } *modularity = mod/(2*G->m); } void modularity_spectral(graph_t *G, attr_id_t *membership, attr_id_t *numCommunities, attr_id_t use_improvement) { attr_id_t *v2C, *degree, *vertex, *v2pos; attr_id_t u,v,curCommunity=0, newCommunity,toSplit; attr_id_t sumV1,sumV2,comm,count1,count2; attr_id_t i,j,communitySize,degreeSum; list_t *Q; node_t *first; double *eigenVectorOld,*eigenVectorNew; attr_id_t continue_flag = 0; double *contribution, max_contrib, modularity, new_modularity; attr_id_t degree_u, degree_v, degreeSum1, degreeSum2, maxv, flag, flag_counter; attr_id_t n, m; n = G->n; m = G->m; *numCommunities = 1; v2C = membership; /* v2C is a map from vertex to the community it belongs. */ vertex = (attr_id_t *) malloc(sizeof(attr_id_t)*n); /* vertex is an array of vertices belonging to a particular community.*/ v2pos = (attr_id_t *) malloc(sizeof(attr_id_t)*n); /* v2pos is a map from vertex to its respective position in the community.*/ degree = (attr_id_t *) malloc(sizeof(attr_id_t)*n); /* degree is a map from vertex to its degree in its respective community.*/ eigenVectorOld=(double*)malloc(sizeof(double)*n); eigenVectorNew=(double*)malloc(sizeof(double)*n); contribution = (double*)malloc(sizeof(double)*n); assert(v2C != NULL); assert(vertex != NULL); assert(degree != NULL); assert(eigenVectorOld!=NULL); assert(eigenVectorNew!=NULL); assert(contribution != NULL); for(i=0; i< G->n; i++) { v2C[i] = 0; vertex[i] = i; v2pos[i] = i; degree[i] = G->numEdges[i+1] - G->numEdges[i]; } /* Making a queue. This queue will store all the communities that are yet to be processed. */ Q=(list_t*)makeList(); append(Q, makeNode(curCommunity)); /* printList(Q); */ while(Q->size > 0) { first = (node_t*) getFirst(Q); curCommunity = first->id; deleteFirst(Q); continue_flag = 0; #ifdef _OPENMP OMP("omp parallel for") #endif for(i=0; i<n; i++) { vertex[i] = -1; } degreeSum=0; communitySize=-1; /* Checking which all vertices belong to this community and updating the vertex Vector accordingly. */ for(i=0; i<G->n; i++) { contribution[i] = 0.0; /* added later for klin */ if(v2C[i] == curCommunity) { { communitySize++; vertex[communitySize] = i; } v2pos[i] = communitySize; degreeSum += G->numEdges[i+1]-G->numEdges[i]; } } communitySize ++; if(communitySize == 1) continue; /* Calculating modularity by the current Community. */ modularity = 0.0; #ifdef _OPENMP OMP("omp parallel for private(j) reduction(+:modularity)") #endif for(i=0; i<n;i++) { if(v2C[i] == curCommunity) { for(j=G->numEdges[i] ; j<G->numEdges[i+1]; j++) { if(v2C[G->endV[j]] == curCommunity) modularity += 1.0; } modularity += -((G->numEdges[i+1] - G->numEdges[i])*degreeSum)/(2.0*G->m); } } modularity /=(2.0*G->m); /* Computing eigen vector. */ computeEigen(G,eigenVectorOld, eigenVectorNew, v2C, v2pos, degree, vertex, &toSplit, curCommunity, communitySize, degreeSum); if(toSplit == 0) continue; newCommunity = *numCommunities; count1=count2=sumV1=sumV2=0; degreeSum1 = degreeSum2 = 0; new_modularity=0.0; #ifdef _OPENMP OMP("omp parallel for reduction(+:count1,count2)") #endif for(i=0; i<communitySize; i++) { if(eigenVectorOld[i] > 0) count1++; else count2++; } if(count1 == 0 || count2 == 0) continue; /* All eigen values are of same size and hence no division is required. */ /* Now, we actually divide the community to new communities. */ #ifdef _OPENMP OMP("omp parallel if (communitySize>100) ") #endif { #ifdef _OPENMP OMP("omp for reduction(+:sumV1,sumV2)") #endif for(i=0; i<communitySize ; i++) { if(eigenVectorOld[i] > 0) { v2C[vertex[i]] = newCommunity; sumV1++; } else sumV2++; } /* Calculating new degree sums. */ #ifdef _OPENMP OMP("omp for reduction(+:degreeSum1, degreeSum2) private(comm)") #endif for(i=0; i<communitySize; i++) { comm = v2C[vertex[i]]; if (comm == curCommunity) degreeSum1 += G->numEdges[vertex[i]+1] - G->numEdges[vertex[i]]; else degreeSum2 += G->numEdges[vertex[i]+1] - G->numEdges[vertex[i]]; } /* Calculating new modularity value. */ #ifdef _OPENMP OMP("omp for private(u,v,degree_u, degree_v, comm) reduction(+:new_modularity)") #endif for(i=0; i<communitySize; i++) { u = vertex[i]; comm = v2C[u]; degree_u = G->numEdges[u+1] - G->numEdges[u]; for(j=G->numEdges[u]; j< G->numEdges[u+1]; j++) { v = G->endV[j]; degree_v = G->numEdges[v+1] - G->numEdges[v]; if ((v2C[v] == curCommunity && comm == curCommunity) || (v2C[v] == newCommunity && comm == newCommunity)) contribution[u] -=1.0; else if((v2C[v] == newCommunity && comm == curCommunity) || (v2C[v] == curCommunity && comm == newCommunity)) contribution[u] += 1.0; if(comm == v2C[v]) new_modularity +=1.0; } if(comm == curCommunity) { contribution[u] += (degree_u * (degreeSum1 ))/(2.0*G->m); contribution[u] -= (degree_u * (degreeSum2+degree_u ))/(2.0*G->m); new_modularity += -(double)(degree_u * degreeSum1)/(double)(2.0 * G->m); } else { contribution[u] += (degree_u * (degreeSum2 ))/(2.0*G->m); contribution[u] -= (degree_u * (degreeSum1 + degree_u ))/(2.0*G->m); new_modularity += -(double)(degree_u * degreeSum2)/(double)(2.0 * G->m); } } } new_modularity /= (2.0*G->m); if(new_modularity < modularity) { for(i=0; i<communitySize; i++) v2C[vertex[i]] = curCommunity; continue; } /* Now updating the degree Vectors */ #ifdef _OPENMP OMP("omp parallel for private(j,comm) if (communitySize>100)") #endif for(i=0; i<communitySize ; i++) { comm = v2C[vertex[i]]; degree[vertex[i]] = 0; for(j=G->numEdges[vertex[i]]; j<G->numEdges[vertex[i]+1]; j++) { if(v2C[G->endV[j]] == comm) degree[vertex[i]]++; } } /* KL - improvement */ if(use_improvement == 1) { max_contrib = -999999; maxv = -1; flag = 0; flag_counter = 0; while(flag == 0) { flag_counter ++; for(i=0; i<communitySize; i++) { if (contribution[vertex[i]]>0 && contribution[vertex[i]] > max_contrib) { max_contrib = contribution[vertex[i]]; maxv = vertex[i]; } } if(maxv == -1) flag = 1; else { /* swap communities. */ if(v2C[maxv] == curCommunity) v2C[maxv] = newCommunity; else v2C[maxv] = curCommunity; /* now update the neighbours */ for(j=G->numEdges[maxv]; j<G->numEdges[maxv+1]; j++) { if(v2C[G->endV[j]] == v2C[maxv]) contribution[G->endV[j]] -= 1.0; /* now they are in same community */ else if (v2C[G->endV[j]] == newCommunity || v2C[G->endV[j]] == curCommunity) contribution[G->endV[j]] += 1.0; /* not they are in different community but earlier same. */ } degree_u = G->numEdges[maxv+1] - G->numEdges[maxv]; #ifdef _OPENMP OMP("omp parallel for private(v, degree_v) if (communitySize>100)") #endif for(j=0; j<communitySize; j++) { v = vertex[j]; degree_v = G->numEdges[v+1] - G->numEdges[v]; if(v2C[v] == v2C[maxv]) /*same community now */ contribution[v] += (degree_v*degree_u)/(2.0*G->m); else /* different community earlier same. */ contribution[v] -= (degree_v*degree_u)/(2.0*G->m); } } contribution[maxv] = max_contrib = -99999; maxv = -1; } } *numCommunities = *numCommunities + 1; append(Q,makeNode(curCommunity)); append(Q,makeNode(newCommunity)); } } void computeEigen(graph_t *G, double *eigenVectorOld, double *eigenVectorNew, attr_id_t *v2C, attr_id_t *v2pos, attr_id_t* degree, attr_id_t *vertex, attr_id_t *toSplit, attr_id_t currCommunity, attr_id_t communitySize, attr_id_t degreeSum) { attr_id_t i,j; attr_id_t iterCount,count, niter; double eigenValue,degree_u; double normalizedSum,ktx,mneg; attr_id_t numThreads; niter = (communitySize > 100) ? communitySize:100; mneg = 0.0; niter = 10*log(communitySize); count = 0; while(1) { iterCount = 0; count++; normalizedSum=0.0; srand(time(NULL)); #ifdef _OPENMP OMP("omp parallel if(communitySize>100) ") #endif { #ifdef _OPENMP OMP("omp for reduction(+:normalizedSum)") #endif for(i=0; i<communitySize; i++) { eigenVectorOld[i] = 2.0 *((double)rand()/(double)RAND_MAX) -1.0; normalizedSum += eigenVectorOld[i] * eigenVectorOld[i]; } #ifdef _OPENMP OMP("omp single") #endif { normalizedSum = sqrt(normalizedSum); } #ifdef _OPENMP OMP("omp for") #endif for(i=0; i<communitySize; i++) eigenVectorOld[i] = eigenVectorOld[i]/normalizedSum; } while(iterCount <niter) { { iterCount++; ktx=0.0; #ifdef _OPENMP numThreads = omp_get_num_threads(); #else numThreads = 1; #endif } #ifdef _OPENMP OMP("omp parallel if (communitySize>100)") #endif { #ifdef _OPENMP OMP("omp for private(j,degree_u) reduction(+:ktx)") #endif for(i=0; i<communitySize; i++) { eigenVectorNew[i]=0.0; degree_u = G->numEdges[vertex[i]+1] - G->numEdges[vertex[i]]; for(j=G->numEdges[vertex[i]]; j<G->numEdges[vertex[i]+1]; j++) { if(v2C[G->endV[j]] == currCommunity) { eigenVectorNew[i] += G->dbl_weight_e[j] * eigenVectorOld[v2pos[G->endV[j]]]; } } eigenVectorNew[i] -= (((double)degree[vertex[i]]) - (double)(degree_u * degreeSum)/(double)(2.0*G->m))* eigenVectorOld[i]; ktx += (double) degree_u * eigenVectorOld[i]; } #ifdef _OPENMP OMP("omp single") #endif { ktx /=(double)(2.0 * G->m); normalizedSum = 0.0; } #ifdef _OPENMP OMP("omp for reduction(+:normalizedSum)") #endif for(i=0; i<communitySize; i++) { eigenVectorNew[i] -= (double)(G->numEdges[vertex[i]+1]-G->numEdges[vertex[i]]) *ktx; eigenVectorNew[i] -= mneg*eigenVectorOld[i]; normalizedSum += eigenVectorNew[i]*eigenVectorNew[i]; } #ifdef _OPENMP OMP("omp single") #endif { normalizedSum = sqrt(normalizedSum); } #ifdef _OPENMP OMP("omp for") #endif for(i=0; i<communitySize;i++) { eigenVectorOld[i] = eigenVectorNew[i]/normalizedSum; } } } eigenValue =0.0; ktx=0.0; #ifdef _OPENMP OMP("omp parallel if (communitySize>100)") #endif { #ifdef _OPENMP OMP("omp for reduction(+:ktx,eigenValue) private(j,degree_u)") #endif for(i=0; i<communitySize; i++) { degree_u = G->numEdges[vertex[i]+1] - G->numEdges[vertex[i]]; for(j=G->numEdges[vertex[i]]; j<G->numEdges[vertex[i]+1]; j++) { if(v2C[G->endV[j]] == currCommunity) { eigenValue += G->dbl_weight_e[j] * eigenVectorOld[v2pos[G->endV[j]]] * eigenVectorOld[i]; } } eigenValue += -((double)degree[vertex[i]] - (double)(((double) degree_u *degreeSum)/(double)(2.0*G->m)))* eigenVectorOld[i] * eigenVectorOld[i]; ktx += (double)degree_u * eigenVectorOld[i]; } #ifdef _OPENMP OMP("omp single") #endif { ktx /=(double)(2.0 * G->m); } #ifdef _OPENMP OMP("omp for reduction(-:eigenValue)") #endif for(i=0; i<communitySize; i++) eigenValue -= ((double)(G->numEdges[vertex[i]+1]- G->numEdges[vertex[i]])) *ktx *eigenVectorOld[i]; } /* printf("The eigenValue is %f\n",eigenValue); */ { if(eigenValue <0.0000001) { if(count==2) { *toSplit = 0; break; } mneg=eigenValue; } else { *toSplit = 1; break; } } } } /* Simplistic mod without klin */ void modularity_spectral_wo_klin(graph_t *G, attr_id_t *membership, attr_id_t *numCommunities) { attr_id_t *v2C, *degree, *vertex, *v2pos; attr_id_t curCommunity=0, newCommunity,toSplit; attr_id_t n=G->n,sumV1,sumV2,comm,count1,count2; attr_id_t i,j,communitySize,degreeSum; list_t *Q; node_t *first; double *eigenVectorOld,*eigenVectorNew; attr_id_t continue_flag = 0; *numCommunities = 1; v2C = membership; /* v2C is a map from vertex to the community it belongs. */ vertex = (attr_id_t *) malloc(sizeof(attr_id_t)*n); /* vertex is an array of vertices belonging to a particular community. */ v2pos = (attr_id_t *) malloc(sizeof(attr_id_t)*n); /* v2pos is a map from vertex to its respective position in the community.*/ degree = (attr_id_t *) malloc(sizeof(attr_id_t)*n); /* degree is a map from vertex to its degree in its respective community.*/ eigenVectorOld=(double*)malloc(sizeof(double)*n); eigenVectorNew=(double*)malloc(sizeof(double)*n); assert(eigenVectorOld!=NULL);assert(eigenVectorNew!=NULL); for(i=0; i< G->n; i++) { v2C[i] = 0; vertex[i] = i; v2pos[i] = i; degree[i] = G->numEdges[i+1] - G->numEdges[i]; } /* Making a queue. This queue will store all the communities that are yet to be processed. */ Q=(list_t*)makeList(); append(Q, makeNode(curCommunity)); while(Q->size > 0) { first = (node_t*) getFirst(Q); curCommunity = first->id; deleteFirst(Q); continue_flag = 0; for(i=0; i<G->n; i++) { vertex[i] = -1; } /* printf("\n\nEvaluating Community:%d\n",curCommunity); */ degreeSum=0; communitySize=-1; /* Checking which all vertices belong to this community and updating the vertex Vector accordingly. */ /* #pragma omp parallel for shared(communitySize) reduction(+:degreeSum) */ for(i=0; i<G->n; i++) { if(v2C[i] == curCommunity) { { communitySize++; vertex[communitySize] = i; } v2pos[i] = communitySize; degreeSum += G->numEdges[i+1]-G->numEdges[i]; } } communitySize ++; /* printf("community Size =%d, degree Sum =%d\n" ,communitySize, degreeSum); */ if(communitySize == 1) continue; computeEigen(G,eigenVectorOld, eigenVectorNew, v2C,v2pos,degree,vertex,&toSplit,curCommunity,communitySize,degreeSum); if(toSplit == 0) continue; newCommunity = *numCommunities; count1=count2=sumV1=sumV2=0; #ifdef _OPENMP OMP("omp parallel for reduction(+:count1,count2)") #endif for(i=0; i<communitySize; i++) { if(eigenVectorOld[i] > 0) count1++; else count2++; } if(count1 == 0 || count2 == 0) { continue; /*All eigen values are of same size and hence no division is required. */ } #ifdef _OPENMP OMP("omp parallel if (communitySize>100)") #endif { #ifdef _OPENMP OMP("omp for reduction(+:sumV1,sumV2)") #endif for(i=0; i<communitySize ; i++) { if(eigenVectorOld[i] > 0) { v2C[vertex[i]] = newCommunity; sumV1++; } else sumV2++; } /* Now updating the degree Vectors */ #ifdef _OPENMP OMP("omp for private(j,comm)") #endif for(i=0; i<communitySize ; i++) { comm = v2C[vertex[i]]; degree[vertex[i]] = 0; for(j=G->numEdges[vertex[i]]; j<G->numEdges[vertex[i]+1]; j++) { if(v2C[G->endV[j]] == comm) degree[vertex[i]]++; } } } *numCommunities = *numCommunities + 1; append(Q,makeNode(curCommunity)); append(Q,makeNode(newCommunity)); } }
convolution_7x7_pack1to8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv7x7s2_pack1to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f); out0.fill(_bias0); for (int q = 0; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); const __fp16* r3 = img0.row<const __fp16>(3); const __fp16* r4 = img0.row<const __fp16>(4); const __fp16* r5 = img0.row<const __fp16>(5); const __fp16* r6 = img0.row<const __fp16>(6); const __fp16* kptr = kernel.channel(p).row<const __fp16>(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" // sum0 "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r0 "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[2] \n" "fmla v26.8h, v16.8h, v0.h[4] \n" "fmla v27.8h, v16.8h, v0.h[6] \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v1.h[2] \n" "fmla v30.8h, v16.8h, v1.h[4] \n" "fmla v31.8h, v16.8h, v1.h[6] \n" "sub %0, %0, #64 \n" "fmla v24.8h, v17.8h, v0.h[1] \n" "fmla v25.8h, v17.8h, v0.h[3] \n" "fmla v26.8h, v17.8h, v0.h[5] \n" "fmla v27.8h, v17.8h, v0.h[7] \n" "fmla v28.8h, v17.8h, v1.h[1] \n" "fmla v29.8h, v17.8h, v1.h[3] \n" "fmla v30.8h, v17.8h, v1.h[5] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v0.h[2] \n" "fmla v25.8h, v18.8h, v0.h[4] \n" "fmla v26.8h, v18.8h, v0.h[6] \n" "fmla v27.8h, v18.8h, v1.h[0] \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v1.h[4] \n" "fmla v30.8h, v18.8h, v1.h[6] \n" "fmla v31.8h, v18.8h, v2.h[0] \n" "fmla v24.8h, v19.8h, v0.h[3] \n" "fmla v25.8h, v19.8h, v0.h[5] \n" "fmla v26.8h, v19.8h, v0.h[7] \n" "fmla v27.8h, v19.8h, v1.h[1] \n" "fmla v28.8h, v19.8h, v1.h[3] \n" "fmla v29.8h, v19.8h, v1.h[5] \n" "fmla v30.8h, v19.8h, v1.h[7] \n" "fmla v31.8h, v19.8h, v2.h[1] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v4.8h, v5.8h, v6.8h}, [%2] \n" // r1 "fmla v24.8h, v20.8h, v0.h[4] \n" "fmla v25.8h, v20.8h, v0.h[6] \n" "fmla v26.8h, v20.8h, v1.h[0] \n" "fmla v27.8h, v20.8h, v1.h[2] \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v1.h[6] \n" "fmla v30.8h, v20.8h, v2.h[0] \n" "fmla v31.8h, v20.8h, v2.h[2] \n" "fmla v24.8h, v21.8h, v0.h[5] \n" "fmla v25.8h, v21.8h, v0.h[7] \n" "fmla v26.8h, v21.8h, v1.h[1] \n" "fmla v27.8h, v21.8h, v1.h[3] \n" "fmla v28.8h, v21.8h, v1.h[5] \n" "fmla v29.8h, v21.8h, v1.h[7] \n" "fmla v30.8h, v21.8h, v2.h[1] \n" "fmla v31.8h, v21.8h, v2.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v0.h[6] \n" "fmla v25.8h, v22.8h, v1.h[0] \n" "fmla v26.8h, v22.8h, v1.h[2] \n" "fmla v27.8h, v22.8h, v1.h[4] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v2.h[0] \n" "fmla v30.8h, v22.8h, v2.h[2] \n" "fmla v31.8h, v22.8h, v2.h[4] \n" "fmla v24.8h, v23.8h, v4.h[0] \n" "fmla v25.8h, v23.8h, v4.h[2] \n" "fmla v26.8h, v23.8h, v4.h[4] \n" "fmla v27.8h, v23.8h, v4.h[6] \n" "fmla v28.8h, v23.8h, v5.h[0] \n" "fmla v29.8h, v23.8h, v5.h[2] \n" "fmla v30.8h, v23.8h, v5.h[4] \n" "fmla v31.8h, v23.8h, v5.h[6] \n" "fmla v24.8h, v16.8h, v4.h[1] \n" "fmla v25.8h, v16.8h, v4.h[3] \n" "fmla v26.8h, v16.8h, v4.h[5] \n" "fmla v27.8h, v16.8h, v4.h[7] \n" "fmla v28.8h, v16.8h, v5.h[1] \n" "fmla v29.8h, v16.8h, v5.h[3] \n" "fmla v30.8h, v16.8h, v5.h[5] \n" "fmla v31.8h, v16.8h, v5.h[7] \n" "fmla v24.8h, v17.8h, v4.h[2] \n" "fmla v25.8h, v17.8h, v4.h[4] \n" "fmla v26.8h, v17.8h, v4.h[6] \n" "fmla v27.8h, v17.8h, v5.h[0] \n" "fmla v28.8h, v17.8h, v5.h[2] \n" "fmla v29.8h, v17.8h, v5.h[4] \n" "fmla v30.8h, v17.8h, v5.h[6] \n" "fmla v31.8h, v17.8h, v6.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v4.h[3] \n" "fmla v25.8h, v18.8h, v4.h[5] \n" "fmla v26.8h, v18.8h, v4.h[7] \n" "fmla v27.8h, v18.8h, v5.h[1] \n" "fmla v28.8h, v18.8h, v5.h[3] \n" "fmla v29.8h, v18.8h, v5.h[5] \n" "fmla v30.8h, v18.8h, v5.h[7] \n" "fmla v31.8h, v18.8h, v6.h[1] \n" "fmla v24.8h, v19.8h, v4.h[4] \n" "fmla v25.8h, v19.8h, v4.h[6] \n" "fmla v26.8h, v19.8h, v5.h[0] \n" "fmla v27.8h, v19.8h, v5.h[2] \n" "fmla v28.8h, v19.8h, v5.h[4] \n" "fmla v29.8h, v19.8h, v5.h[6] \n" "fmla v30.8h, v19.8h, v6.h[0] \n" "fmla v31.8h, v19.8h, v6.h[2] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r2 "fmla v24.8h, v20.8h, v4.h[5] \n" "fmla v25.8h, v20.8h, v4.h[7] \n" "fmla v26.8h, v20.8h, v5.h[1] \n" "fmla v27.8h, v20.8h, v5.h[3] \n" "fmla v28.8h, v20.8h, v5.h[5] \n" "fmla v29.8h, v20.8h, v5.h[7] \n" "fmla v30.8h, v20.8h, v6.h[1] \n" "fmla v31.8h, v20.8h, v6.h[3] \n" "fmla v24.8h, v21.8h, v4.h[6] \n" "fmla v25.8h, v21.8h, v5.h[0] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[4] \n" "fmla v28.8h, v21.8h, v5.h[6] \n" "fmla v29.8h, v21.8h, v6.h[0] \n" "fmla v30.8h, v21.8h, v6.h[2] \n" "fmla v31.8h, v21.8h, v6.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v0.h[0] \n" "fmla v25.8h, v22.8h, v0.h[2] \n" "fmla v26.8h, v22.8h, v0.h[4] \n" "fmla v27.8h, v22.8h, v0.h[6] \n" "fmla v28.8h, v22.8h, v1.h[0] \n" "fmla v29.8h, v22.8h, v1.h[2] \n" "fmla v30.8h, v22.8h, v1.h[4] \n" "fmla v31.8h, v22.8h, v1.h[6] \n" "fmla v24.8h, v23.8h, v0.h[1] \n" "fmla v25.8h, v23.8h, v0.h[3] \n" "fmla v26.8h, v23.8h, v0.h[5] \n" "fmla v27.8h, v23.8h, v0.h[7] \n" "fmla v28.8h, v23.8h, v1.h[1] \n" "fmla v29.8h, v23.8h, v1.h[3] \n" "fmla v30.8h, v23.8h, v1.h[5] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "fmla v24.8h, v16.8h, v0.h[2] \n" "fmla v25.8h, v16.8h, v0.h[4] \n" "fmla v26.8h, v16.8h, v0.h[6] \n" "fmla v27.8h, v16.8h, v1.h[0] \n" "fmla v28.8h, v16.8h, v1.h[2] \n" "fmla v29.8h, v16.8h, v1.h[4] \n" "fmla v30.8h, v16.8h, v1.h[6] \n" "fmla v31.8h, v16.8h, v2.h[0] \n" "fmla v24.8h, v17.8h, v0.h[3] \n" "fmla v25.8h, v17.8h, v0.h[5] \n" "fmla v26.8h, v17.8h, v0.h[7] \n" "fmla v27.8h, v17.8h, v1.h[1] \n" "fmla v28.8h, v17.8h, v1.h[3] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[7] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v0.h[4] \n" "fmla v25.8h, v18.8h, v0.h[6] \n" "fmla v26.8h, v18.8h, v1.h[0] \n" "fmla v27.8h, v18.8h, v1.h[2] \n" "fmla v28.8h, v18.8h, v1.h[4] \n" "fmla v29.8h, v18.8h, v1.h[6] \n" "fmla v30.8h, v18.8h, v2.h[0] \n" "fmla v31.8h, v18.8h, v2.h[2] \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v4.8h, v5.8h, v6.8h}, [%4] \n" // r3 "fmla v24.8h, v19.8h, v0.h[5] \n" "fmla v25.8h, v19.8h, v0.h[7] \n" "fmla v26.8h, v19.8h, v1.h[1] \n" "fmla v27.8h, v19.8h, v1.h[3] \n" "fmla v28.8h, v19.8h, v1.h[5] \n" "fmla v29.8h, v19.8h, v1.h[7] \n" "fmla v30.8h, v19.8h, v2.h[1] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "fmla v24.8h, v20.8h, v0.h[6] \n" "fmla v25.8h, v20.8h, v1.h[0] \n" "fmla v26.8h, v20.8h, v1.h[2] \n" "fmla v27.8h, v20.8h, v1.h[4] \n" "fmla v28.8h, v20.8h, v1.h[6] \n" "fmla v29.8h, v20.8h, v2.h[0] \n" "fmla v30.8h, v20.8h, v2.h[2] \n" "fmla v31.8h, v20.8h, v2.h[4] \n" "fmla v24.8h, v21.8h, v4.h[0] \n" "fmla v25.8h, v21.8h, v4.h[2] \n" "fmla v26.8h, v21.8h, v4.h[4] \n" "fmla v27.8h, v21.8h, v4.h[6] \n" "fmla v28.8h, v21.8h, v5.h[0] \n" "fmla v29.8h, v21.8h, v5.h[2] \n" "fmla v30.8h, v21.8h, v5.h[4] \n" "fmla v31.8h, v21.8h, v5.h[6] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v4.h[1] \n" "fmla v25.8h, v22.8h, v4.h[3] \n" "fmla v26.8h, v22.8h, v4.h[5] \n" "fmla v27.8h, v22.8h, v4.h[7] \n" "fmla v28.8h, v22.8h, v5.h[1] \n" "fmla v29.8h, v22.8h, v5.h[3] \n" "fmla v30.8h, v22.8h, v5.h[5] \n" "fmla v31.8h, v22.8h, v5.h[7] \n" "fmla v24.8h, v23.8h, v4.h[2] \n" "fmla v25.8h, v23.8h, v4.h[4] \n" "fmla v26.8h, v23.8h, v4.h[6] \n" "fmla v27.8h, v23.8h, v5.h[0] \n" "fmla v28.8h, v23.8h, v5.h[2] \n" "fmla v29.8h, v23.8h, v5.h[4] \n" "fmla v30.8h, v23.8h, v5.h[6] \n" "fmla v31.8h, v23.8h, v6.h[0] \n" "fmla v24.8h, v16.8h, v4.h[3] \n" "fmla v25.8h, v16.8h, v4.h[5] \n" "fmla v26.8h, v16.8h, v4.h[7] \n" "fmla v27.8h, v16.8h, v5.h[1] \n" "fmla v28.8h, v16.8h, v5.h[3] \n" "fmla v29.8h, v16.8h, v5.h[5] \n" "fmla v30.8h, v16.8h, v5.h[7] \n" "fmla v31.8h, v16.8h, v6.h[1] \n" "fmla v24.8h, v17.8h, v4.h[4] \n" "fmla v25.8h, v17.8h, v4.h[6] \n" "fmla v26.8h, v17.8h, v5.h[0] \n" "fmla v27.8h, v17.8h, v5.h[2] \n" "fmla v28.8h, v17.8h, v5.h[4] \n" "fmla v29.8h, v17.8h, v5.h[6] \n" "fmla v30.8h, v17.8h, v6.h[0] \n" "fmla v31.8h, v17.8h, v6.h[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v4.h[5] \n" "fmla v25.8h, v18.8h, v4.h[7] \n" "fmla v26.8h, v18.8h, v5.h[1] \n" "fmla v27.8h, v18.8h, v5.h[3] \n" "fmla v28.8h, v18.8h, v5.h[5] \n" "fmla v29.8h, v18.8h, v5.h[7] \n" "fmla v30.8h, v18.8h, v6.h[1] \n" "fmla v31.8h, v18.8h, v6.h[3] \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%5] \n" // r4 "fmla v24.8h, v19.8h, v4.h[6] \n" "fmla v25.8h, v19.8h, v5.h[0] \n" "fmla v26.8h, v19.8h, v5.h[2] \n" "fmla v27.8h, v19.8h, v5.h[4] \n" "fmla v28.8h, v19.8h, v5.h[6] \n" "fmla v29.8h, v19.8h, v6.h[0] \n" "fmla v30.8h, v19.8h, v6.h[2] \n" "fmla v31.8h, v19.8h, v6.h[4] \n" "fmla v24.8h, v20.8h, v0.h[0] \n" "fmla v25.8h, v20.8h, v0.h[2] \n" "fmla v26.8h, v20.8h, v0.h[4] \n" "fmla v27.8h, v20.8h, v0.h[6] \n" "fmla v28.8h, v20.8h, v1.h[0] \n" "fmla v29.8h, v20.8h, v1.h[2] \n" "fmla v30.8h, v20.8h, v1.h[4] \n" "fmla v31.8h, v20.8h, v1.h[6] \n" "fmla v24.8h, v21.8h, v0.h[1] \n" "fmla v25.8h, v21.8h, v0.h[3] \n" "fmla v26.8h, v21.8h, v0.h[5] \n" "fmla v27.8h, v21.8h, v0.h[7] \n" "fmla v28.8h, v21.8h, v1.h[1] \n" "fmla v29.8h, v21.8h, v1.h[3] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v1.h[7] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v0.h[2] \n" "fmla v25.8h, v22.8h, v0.h[4] \n" "fmla v26.8h, v22.8h, v0.h[6] \n" "fmla v27.8h, v22.8h, v1.h[0] \n" "fmla v28.8h, v22.8h, v1.h[2] \n" "fmla v29.8h, v22.8h, v1.h[4] \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v22.8h, v2.h[0] \n" "fmla v24.8h, v23.8h, v0.h[3] \n" "fmla v25.8h, v23.8h, v0.h[5] \n" "fmla v26.8h, v23.8h, v0.h[7] \n" "fmla v27.8h, v23.8h, v1.h[1] \n" "fmla v28.8h, v23.8h, v1.h[3] \n" "fmla v29.8h, v23.8h, v1.h[5] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v2.h[1] \n" "prfm pldl1keep, [%6, #384] \n" "ld1 {v4.8h, v5.8h, v6.8h}, [%6] \n" // r5 "fmla v24.8h, v16.8h, v0.h[4] \n" "fmla v25.8h, v16.8h, v0.h[6] \n" "fmla v26.8h, v16.8h, v1.h[0] \n" "fmla v27.8h, v16.8h, v1.h[2] \n" "fmla v28.8h, v16.8h, v1.h[4] \n" "fmla v29.8h, v16.8h, v1.h[6] \n" "fmla v30.8h, v16.8h, v2.h[0] \n" "fmla v31.8h, v16.8h, v2.h[2] \n" "fmla v24.8h, v17.8h, v0.h[5] \n" "fmla v25.8h, v17.8h, v0.h[7] \n" "fmla v26.8h, v17.8h, v1.h[1] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[5] \n" "fmla v29.8h, v17.8h, v1.h[7] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v2.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v0.h[6] \n" "fmla v25.8h, v18.8h, v1.h[0] \n" "fmla v26.8h, v18.8h, v1.h[2] \n" "fmla v27.8h, v18.8h, v1.h[4] \n" "fmla v28.8h, v18.8h, v1.h[6] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v2.h[0] \n" "fmla v31.8h, v18.8h, v2.h[4] \n" "fmla v24.8h, v19.8h, v4.h[0] \n" "fmla v25.8h, v19.8h, v4.h[2] \n" "fmla v26.8h, v19.8h, v4.h[4] \n" "fmla v27.8h, v19.8h, v4.h[6] \n" "fmla v28.8h, v19.8h, v5.h[0] \n" "fmla v29.8h, v19.8h, v5.h[2] \n" "fmla v30.8h, v19.8h, v5.h[4] \n" "fmla v31.8h, v19.8h, v5.h[6] \n" "fmla v24.8h, v20.8h, v4.h[1] \n" "fmla v25.8h, v20.8h, v4.h[3] \n" "fmla v26.8h, v20.8h, v4.h[5] \n" "fmla v27.8h, v20.8h, v4.h[7] \n" "fmla v28.8h, v20.8h, v5.h[1] \n" "fmla v29.8h, v20.8h, v5.h[3] \n" "fmla v30.8h, v20.8h, v5.h[5] \n" "fmla v31.8h, v20.8h, v5.h[7] \n" "fmla v24.8h, v21.8h, v4.h[2] \n" "fmla v25.8h, v21.8h, v4.h[4] \n" "fmla v26.8h, v21.8h, v4.h[6] \n" "fmla v27.8h, v21.8h, v5.h[0] \n" "fmla v28.8h, v21.8h, v5.h[2] \n" "fmla v29.8h, v21.8h, v5.h[4] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v6.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v24.8h, v22.8h, v4.h[3] \n" "fmla v25.8h, v22.8h, v4.h[5] \n" "fmla v26.8h, v22.8h, v4.h[7] \n" "fmla v27.8h, v22.8h, v5.h[1] \n" "fmla v28.8h, v22.8h, v5.h[3] \n" "fmla v29.8h, v22.8h, v5.h[5] \n" "fmla v30.8h, v22.8h, v5.h[7] \n" "fmla v31.8h, v22.8h, v6.h[1] \n" "fmla v24.8h, v23.8h, v4.h[4] \n" "fmla v25.8h, v23.8h, v4.h[6] \n" "fmla v26.8h, v23.8h, v5.h[0] \n" "fmla v27.8h, v23.8h, v5.h[2] \n" "fmla v28.8h, v23.8h, v5.h[4] \n" "fmla v29.8h, v23.8h, v5.h[6] \n" "fmla v30.8h, v23.8h, v6.h[0] \n" "fmla v31.8h, v23.8h, v6.h[2] \n" "prfm pldl1keep, [%7, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%7] \n" // r6 "fmla v24.8h, v16.8h, v4.h[5] \n" "fmla v25.8h, v16.8h, v4.h[7] \n" "fmla v26.8h, v16.8h, v5.h[1] \n" "fmla v27.8h, v16.8h, v5.h[3] \n" "fmla v28.8h, v16.8h, v5.h[5] \n" "fmla v29.8h, v16.8h, v5.h[7] \n" "fmla v30.8h, v16.8h, v6.h[1] \n" "fmla v31.8h, v16.8h, v6.h[3] \n" "fmla v24.8h, v17.8h, v4.h[6] \n" "fmla v25.8h, v17.8h, v5.h[0] \n" "fmla v26.8h, v17.8h, v5.h[2] \n" "fmla v27.8h, v17.8h, v5.h[4] \n" "fmla v28.8h, v17.8h, v5.h[6] \n" "fmla v29.8h, v17.8h, v6.h[0] \n" "fmla v30.8h, v17.8h, v6.h[2] \n" "fmla v31.8h, v17.8h, v6.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v24.8h, v18.8h, v0.h[0] \n" "fmla v25.8h, v18.8h, v0.h[2] \n" "fmla v26.8h, v18.8h, v0.h[4] \n" "fmla v27.8h, v18.8h, v0.h[6] \n" "fmla v28.8h, v18.8h, v1.h[0] \n" "fmla v29.8h, v18.8h, v1.h[2] \n" "fmla v30.8h, v18.8h, v1.h[4] \n" "fmla v31.8h, v18.8h, v1.h[6] \n" "fmla v24.8h, v19.8h, v0.h[1] \n" "fmla v25.8h, v19.8h, v0.h[3] \n" "fmla v26.8h, v19.8h, v0.h[5] \n" "fmla v27.8h, v19.8h, v0.h[7] \n" "fmla v28.8h, v19.8h, v1.h[1] \n" "fmla v29.8h, v19.8h, v1.h[3] \n" "fmla v30.8h, v19.8h, v1.h[5] \n" "fmla v31.8h, v19.8h, v1.h[7] \n" "fmla v24.8h, v20.8h, v0.h[2] \n" "fmla v25.8h, v20.8h, v0.h[4] \n" "fmla v26.8h, v20.8h, v0.h[6] \n" "fmla v27.8h, v20.8h, v1.h[0] \n" "fmla v28.8h, v20.8h, v1.h[2] \n" "fmla v29.8h, v20.8h, v1.h[4] \n" "fmla v30.8h, v20.8h, v1.h[6] \n" "fmla v31.8h, v20.8h, v2.h[0] \n" "add %1, %1, #32 \n" "fmla v24.8h, v21.8h, v0.h[3] \n" "fmla v25.8h, v21.8h, v0.h[5] \n" "fmla v26.8h, v21.8h, v0.h[7] \n" "fmla v27.8h, v21.8h, v1.h[1] \n" "add %2, %2, #32 \n" "fmla v28.8h, v21.8h, v1.h[3] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v21.8h, v1.h[7] \n" "fmla v31.8h, v21.8h, v2.h[1] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v16.8h}, [%8] \n" "fmla v24.8h, v22.8h, v0.h[4] \n" "fmla v25.8h, v22.8h, v0.h[6] \n" "fmla v26.8h, v22.8h, v1.h[0] \n" "fmla v27.8h, v22.8h, v1.h[2] \n" "add %3, %3, #32 \n" "fmla v28.8h, v22.8h, v1.h[4] \n" "fmla v29.8h, v22.8h, v1.h[6] \n" "fmla v30.8h, v22.8h, v2.h[0] \n" "fmla v31.8h, v22.8h, v2.h[2] \n" "add %4, %4, #32 \n" "fmla v24.8h, v23.8h, v0.h[5] \n" "fmla v25.8h, v23.8h, v0.h[7] \n" "fmla v26.8h, v23.8h, v1.h[1] \n" "fmla v27.8h, v23.8h, v1.h[3] \n" "add %5, %5, #32 \n" "fmla v28.8h, v23.8h, v1.h[5] \n" "fmla v29.8h, v23.8h, v1.h[7] \n" "fmla v30.8h, v23.8h, v2.h[1] \n" "fmla v31.8h, v23.8h, v2.h[3] \n" "add %6, %6, #32 \n" "fmla v24.8h, v16.8h, v0.h[6] \n" "fmla v25.8h, v16.8h, v1.h[0] \n" "fmla v26.8h, v16.8h, v1.h[2] \n" "fmla v27.8h, v16.8h, v1.h[4] \n" "add %7, %7, #32 \n" "fmla v28.8h, v16.8h, v1.h[6] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v16.8h, v2.h[2] \n" "fmla v31.8h, v16.8h, v2.h[4] \n" "sub %8, %8, #768 \n" // kptr -= 48 * 8; "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(r5), // %6 "=r"(r6), // %7 "=r"(kptr) // %8 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(r5), "7"(r6), "8"(kptr) : "memory", "v0", "v1", "v2", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.8h, v1.8h}, [%1] \n" // r0 "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v0.h[2] \n" "fmla v30.8h, v16.8h, v0.h[4] \n" "fmla v31.8h, v16.8h, v0.h[6] \n" "fmla v28.8h, v17.8h, v0.h[1] \n" "fmla v29.8h, v17.8h, v0.h[3] \n" "fmla v30.8h, v17.8h, v0.h[5] \n" "fmla v31.8h, v17.8h, v0.h[7] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v0.h[4] \n" "fmla v30.8h, v18.8h, v0.h[6] \n" "fmla v31.8h, v18.8h, v1.h[0] \n" "fmla v28.8h, v19.8h, v0.h[3] \n" "fmla v29.8h, v19.8h, v0.h[5] \n" "fmla v30.8h, v19.8h, v0.h[7] \n" "fmla v31.8h, v19.8h, v1.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.8h, v3.8h}, [%2] \n" // r1 "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v0.h[6] \n" "fmla v30.8h, v20.8h, v1.h[0] \n" "fmla v31.8h, v20.8h, v1.h[2] \n" "fmla v28.8h, v21.8h, v0.h[5] \n" "fmla v29.8h, v21.8h, v0.h[7] \n" "fmla v30.8h, v21.8h, v1.h[1] \n" "fmla v31.8h, v21.8h, v1.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v1.h[0] \n" "fmla v30.8h, v22.8h, v1.h[2] \n" "fmla v31.8h, v22.8h, v1.h[4] \n" "fmla v28.8h, v23.8h, v2.h[0] \n" "fmla v29.8h, v23.8h, v2.h[2] \n" "fmla v30.8h, v23.8h, v2.h[4] \n" "fmla v31.8h, v23.8h, v2.h[6] \n" "fmla v28.8h, v16.8h, v2.h[1] \n" "fmla v29.8h, v16.8h, v2.h[3] \n" "fmla v30.8h, v16.8h, v2.h[5] \n" "fmla v31.8h, v16.8h, v2.h[7] \n" "fmla v28.8h, v17.8h, v2.h[2] \n" "fmla v29.8h, v17.8h, v2.h[4] \n" "fmla v30.8h, v17.8h, v2.h[6] \n" "fmla v31.8h, v17.8h, v3.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v2.h[3] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[7] \n" "fmla v31.8h, v18.8h, v3.h[1] \n" "fmla v28.8h, v19.8h, v2.h[4] \n" "fmla v29.8h, v19.8h, v2.h[6] \n" "fmla v30.8h, v19.8h, v3.h[0] \n" "fmla v31.8h, v19.8h, v3.h[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.8h, v1.8h}, [%3] \n" // r2 "fmla v28.8h, v20.8h, v2.h[5] \n" "fmla v29.8h, v20.8h, v2.h[7] \n" "fmla v30.8h, v20.8h, v3.h[1] \n" "fmla v31.8h, v20.8h, v3.h[3] \n" "fmla v28.8h, v21.8h, v2.h[6] \n" "fmla v29.8h, v21.8h, v3.h[0] \n" "fmla v30.8h, v21.8h, v3.h[2] \n" "fmla v31.8h, v21.8h, v3.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v0.h[0] \n" "fmla v29.8h, v22.8h, v0.h[2] \n" "fmla v30.8h, v22.8h, v0.h[4] \n" "fmla v31.8h, v22.8h, v0.h[6] \n" "fmla v28.8h, v23.8h, v0.h[1] \n" "fmla v29.8h, v23.8h, v0.h[3] \n" "fmla v30.8h, v23.8h, v0.h[5] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "fmla v28.8h, v16.8h, v0.h[2] \n" "fmla v29.8h, v16.8h, v0.h[4] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v1.h[0] \n" "fmla v28.8h, v17.8h, v0.h[3] \n" "fmla v29.8h, v17.8h, v0.h[5] \n" "fmla v30.8h, v17.8h, v0.h[7] \n" "fmla v31.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v0.h[4] \n" "fmla v29.8h, v18.8h, v0.h[6] \n" "fmla v30.8h, v18.8h, v1.h[0] \n" "fmla v31.8h, v18.8h, v1.h[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v2.8h, v3.8h}, [%4] \n" // r3 "fmla v28.8h, v19.8h, v0.h[5] \n" "fmla v29.8h, v19.8h, v0.h[7] \n" "fmla v30.8h, v19.8h, v1.h[1] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "fmla v28.8h, v20.8h, v0.h[6] \n" "fmla v29.8h, v20.8h, v1.h[0] \n" "fmla v30.8h, v20.8h, v1.h[2] \n" "fmla v31.8h, v20.8h, v1.h[4] \n" "fmla v28.8h, v21.8h, v2.h[0] \n" "fmla v29.8h, v21.8h, v2.h[2] \n" "fmla v30.8h, v21.8h, v2.h[4] \n" "fmla v31.8h, v21.8h, v2.h[6] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v2.h[1] \n" "fmla v29.8h, v22.8h, v2.h[3] \n" "fmla v30.8h, v22.8h, v2.h[5] \n" "fmla v31.8h, v22.8h, v2.h[7] \n" "fmla v28.8h, v23.8h, v2.h[2] \n" "fmla v29.8h, v23.8h, v2.h[4] \n" "fmla v30.8h, v23.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v3.h[0] \n" "fmla v28.8h, v16.8h, v2.h[3] \n" "fmla v29.8h, v16.8h, v2.h[5] \n" "fmla v30.8h, v16.8h, v2.h[7] \n" "fmla v31.8h, v16.8h, v3.h[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.8h, v1.8h}, [%5] \n" // r4 "fmla v28.8h, v17.8h, v2.h[4] \n" "fmla v29.8h, v17.8h, v2.h[6] \n" "fmla v30.8h, v17.8h, v3.h[0] \n" "fmla v31.8h, v17.8h, v3.h[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v2.h[5] \n" "fmla v29.8h, v18.8h, v2.h[7] \n" "fmla v30.8h, v18.8h, v3.h[1] \n" "fmla v31.8h, v18.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v2.h[6] \n" "fmla v29.8h, v19.8h, v3.h[0] \n" "fmla v30.8h, v19.8h, v3.h[2] \n" "fmla v31.8h, v19.8h, v3.h[4] \n" "fmla v28.8h, v20.8h, v0.h[0] \n" "fmla v29.8h, v20.8h, v0.h[2] \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v31.8h, v20.8h, v0.h[6] \n" "fmla v28.8h, v21.8h, v0.h[1] \n" "fmla v29.8h, v21.8h, v0.h[3] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v0.h[7] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v0.h[2] \n" "fmla v29.8h, v22.8h, v0.h[4] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v22.8h, v1.h[0] \n" "fmla v28.8h, v23.8h, v0.h[3] \n" "fmla v29.8h, v23.8h, v0.h[5] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v1.h[1] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v2.8h, v3.8h}, [%6] \n" // r5 "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[6] \n" "fmla v30.8h, v16.8h, v1.h[0] \n" "fmla v31.8h, v16.8h, v1.h[2] \n" "fmla v28.8h, v17.8h, v0.h[5] \n" "fmla v29.8h, v17.8h, v0.h[7] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v1.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v0.h[6] \n" "fmla v29.8h, v18.8h, v1.h[0] \n" "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v18.8h, v1.h[4] \n" "fmla v28.8h, v19.8h, v2.h[0] \n" "fmla v29.8h, v19.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v2.h[4] \n" "fmla v31.8h, v19.8h, v2.h[6] \n" "fmla v28.8h, v20.8h, v2.h[1] \n" "fmla v29.8h, v20.8h, v2.h[3] \n" "fmla v30.8h, v20.8h, v2.h[5] \n" "fmla v31.8h, v20.8h, v2.h[7] \n" "fmla v28.8h, v21.8h, v2.h[2] \n" "fmla v29.8h, v21.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v2.h[6] \n" "fmla v31.8h, v21.8h, v3.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v28.8h, v22.8h, v2.h[3] \n" "fmla v29.8h, v22.8h, v2.h[5] \n" "fmla v30.8h, v22.8h, v2.h[7] \n" "fmla v31.8h, v22.8h, v3.h[1] \n" "add %1, %1, #16 \n" "fmla v28.8h, v23.8h, v2.h[4] \n" "fmla v29.8h, v23.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v3.h[0] \n" "fmla v31.8h, v23.8h, v3.h[2] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v0.8h, v1.8h}, [%7] \n" // r6 "fmla v28.8h, v16.8h, v2.h[5] \n" "fmla v29.8h, v16.8h, v2.h[7] \n" "fmla v30.8h, v16.8h, v3.h[1] \n" "fmla v31.8h, v16.8h, v3.h[3] \n" "add %2, %2, #16 \n" "fmla v28.8h, v17.8h, v2.h[6] \n" "fmla v29.8h, v17.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v3.h[2] \n" "fmla v31.8h, v17.8h, v3.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v28.8h, v18.8h, v0.h[0] \n" "fmla v29.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v18.8h, v0.h[4] \n" "fmla v31.8h, v18.8h, v0.h[6] \n" "add %3, %3, #16 \n" "fmla v28.8h, v19.8h, v0.h[1] \n" "fmla v29.8h, v19.8h, v0.h[3] \n" "fmla v30.8h, v19.8h, v0.h[5] \n" "fmla v31.8h, v19.8h, v0.h[7] \n" "add %4, %4, #16 \n" "fmla v28.8h, v20.8h, v0.h[2] \n" "fmla v29.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v20.8h, v0.h[6] \n" "fmla v31.8h, v20.8h, v1.h[0] \n" "add %5, %5, #16 \n" "fmla v28.8h, v21.8h, v0.h[3] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v21.8h, v0.h[7] \n" "fmla v31.8h, v21.8h, v1.h[1] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v16.8h}, [%8] \n" "fmla v28.8h, v22.8h, v0.h[4] \n" "fmla v29.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v22.8h, v1.h[0] \n" "fmla v31.8h, v22.8h, v1.h[2] \n" "add %6, %6, #16 \n" "fmla v28.8h, v23.8h, v0.h[5] \n" "fmla v29.8h, v23.8h, v0.h[7] \n" "fmla v30.8h, v23.8h, v1.h[1] \n" "fmla v31.8h, v23.8h, v1.h[3] \n" "add %7, %7, #16 \n" "fmla v28.8h, v16.8h, v0.h[6] \n" "fmla v29.8h, v16.8h, v1.h[0] \n" "fmla v30.8h, v16.8h, v1.h[2] \n" "fmla v31.8h, v16.8h, v1.h[4] \n" "sub %8, %8, #768 \n" // kptr -= 48 * 8; "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(r5), // %6 "=r"(r6), // %7 "=r"(kptr) // %8 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(r5), "7"(r6), "8"(kptr) : "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31" ); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1] \n" // r0 "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v31.8h}, [%0] \n" // sum0 "fmul v28.8h, v16.8h, v0.h[0] \n" "fmul v29.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmul v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v1.8h}, [%2] \n" // r1 "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v1.h[0] \n" "fmla v28.8h, v16.8h, v1.h[1] \n" "fmla v29.8h, v17.8h, v1.h[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v1.h[4] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3] \n" // r2 "fmla v28.8h, v20.8h, v1.h[5] \n" "fmla v29.8h, v21.8h, v1.h[6] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v0.h[0] \n" "fmla v31.8h, v23.8h, v0.h[1] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v1.8h}, [%4] \n" // r3 "fmla v28.8h, v16.8h, v0.h[2] \n" "fmla v29.8h, v17.8h, v0.h[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v0.h[4] \n" "fmla v31.8h, v19.8h, v0.h[5] \n" "add %1, %1, #4 \n" "fmla v28.8h, v20.8h, v0.h[6] \n" "fmla v29.8h, v21.8h, v1.h[0] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v1.h[1] \n" "fmla v31.8h, v23.8h, v1.h[2] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.8h}, [%5] \n" // r4 "fmla v28.8h, v16.8h, v1.h[3] \n" "fmla v29.8h, v17.8h, v1.h[4] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v1.h[5] \n" "fmla v31.8h, v19.8h, v1.h[6] \n" "add %2, %2, #4 \n" "fmla v28.8h, v20.8h, v0.h[0] \n" "fmla v29.8h, v21.8h, v0.h[1] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v0.h[2] \n" "fmla v31.8h, v23.8h, v0.h[3] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v1.8h}, [%6] \n" // r5 "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v17.8h, v0.h[5] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v0.h[6] \n" "fmla v31.8h, v19.8h, v1.h[0] \n" "add %3, %3, #4 \n" "fmla v28.8h, v20.8h, v1.h[1] \n" "fmla v29.8h, v21.8h, v1.h[2] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%8], #64 \n" "fmla v30.8h, v22.8h, v1.h[3] \n" "fmla v31.8h, v23.8h, v1.h[4] \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v0.8h}, [%7] \n" // r6 "fmla v28.8h, v16.8h, v1.h[5] \n" "fmla v29.8h, v17.8h, v1.h[6] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%8], #64 \n" "fmla v30.8h, v18.8h, v0.h[0] \n" "fmla v31.8h, v19.8h, v0.h[1] \n" "add %4, %4, #4 \n" "fmla v28.8h, v20.8h, v0.h[2] \n" "fmla v29.8h, v21.8h, v0.h[3] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v16.8h}, [%8] \n" "fmla v30.8h, v22.8h, v0.h[4] \n" "fmla v31.8h, v23.8h, v0.h[5] \n" "add %5, %5, #4 \n" "fmla v28.8h, v16.8h, v0.h[6] \n" "add %6, %6, #4 \n" "fadd v29.8h, v29.8h, v30.8h \n" "fadd v31.8h, v31.8h, v28.8h \n" "add %7, %7, #4 \n" "fadd v29.8h, v29.8h, v31.8h \n" "sub %8, %8, #768 \n" // kptr -= 48 * 8; "st1 {v29.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(r5), // %6 "=r"(r6), // %7 "=r"(kptr) // %8 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(r5), "7"(r6), "8"(kptr) : "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31" ); } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; r5 += tailstep; r6 += tailstep; } } } }
convolution_winograd_transform_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } static void conv3x3s1_winograd63_transform_output_pack8_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const __fp16* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float16x8_t _bias0 = biasptr ? vld1q_f16(biasptr + p * 8) : vdupq_n_f16(0.f); __fp16 tmp[6][8][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 8; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 48; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 56; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _out0tm0 = vld1q_f16(output0_tm_0); float16x8_t _out0tm1 = vld1q_f16(output0_tm_1); float16x8_t _out0tm2 = vld1q_f16(output0_tm_2); float16x8_t _out0tm3 = vld1q_f16(output0_tm_3); float16x8_t _out0tm4 = vld1q_f16(output0_tm_4); float16x8_t _out0tm5 = vld1q_f16(output0_tm_5); float16x8_t _out0tm6 = vld1q_f16(output0_tm_6); float16x8_t _out0tm7 = vld1q_f16(output0_tm_7); float16x8_t _tmp024a = vaddq_f16(_out0tm1, _out0tm2); float16x8_t _tmp135a = vsubq_f16(_out0tm1, _out0tm2); float16x8_t _tmp024b = vaddq_f16(_out0tm3, _out0tm4); float16x8_t _tmp135b = vsubq_f16(_out0tm3, _out0tm4); float16x8_t _tmp024c = vaddq_f16(_out0tm5, _out0tm6); float16x8_t _tmp135c = vsubq_f16(_out0tm5, _out0tm6); float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f)); float16x8_t _tmp2m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x8_t _tmp4m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[4][m], _tmp4m); float16x8_t _tmp1m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x8_t _tmp3m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x8_t _tmp5m = vaddq_f16(vaddq_f16(_out0tm7, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f)); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 64; output0_tm_1 += tiles * 64; output0_tm_2 += tiles * 64; output0_tm_3 += tiles * 64; output0_tm_4 += tiles * 64; output0_tm_5 += tiles * 64; output0_tm_6 += tiles * 64; output0_tm_7 += tiles * 64; } for (int m = 0; m < 6; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _tmp024a = vaddq_f16(_tmp01, _tmp02); float16x8_t _tmp135a = vsubq_f16(_tmp01, _tmp02); float16x8_t _tmp024b = vaddq_f16(_tmp03, _tmp04); float16x8_t _tmp135b = vsubq_f16(_tmp03, _tmp04); float16x8_t _tmp024c = vaddq_f16(_tmp05, _tmp06); float16x8_t _tmp135c = vsubq_f16(_tmp05, _tmp06); float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f))); float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x8_t _out04 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1q_f16(output0, _out00); vst1q_f16(output0 + 16, _out02); vst1q_f16(output0 + 32, _out04); float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x8_t _out05 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp07, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f))); vst1q_f16(output0 + 8, _out01); vst1q_f16(output0 + 24, _out03); vst1q_f16(output0 + 40, _out05); output0 += outw * 8; } } } } } static void conv3x3s1_winograd43_transform_input_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[6][6][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f); float16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f); float16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f); float16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); float16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); float16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); vst1q_f16(tmp[5][m], _tmp5m); r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _r0tm0 = vfmsq_n_f16(vfmaq_n_f16(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float16x8_t _r0tm1 = vfmsq_n_f16(vaddq_f16(_tmp04, _tmp03), vaddq_f16(_tmp01, _tmp02), 4.f); float16x8_t _r0tm2 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp03), vsubq_f16(_tmp01, _tmp02), 4.f); float16x8_t _r0tm3 = vfmsq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f); float16x8_t _r0tm4 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f); float16x8_t _r0tm5 = vfmsq_n_f16(vfmaq_n_f16(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } static void conv3x3s1_winograd43_transform_output_pack8_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const __fp16* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float16x8_t _bias0 = biasptr ? vld1q_f16(biasptr + p * 8) : vdupq_n_f16(0.f); __fp16 tmp[4][6][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 8; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40; __fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { float16x8_t _out0tm0 = vld1q_f16(output0_tm_0); float16x8_t _out0tm1 = vld1q_f16(output0_tm_1); float16x8_t _out0tm2 = vld1q_f16(output0_tm_2); float16x8_t _out0tm3 = vld1q_f16(output0_tm_3); float16x8_t _out0tm4 = vld1q_f16(output0_tm_4); float16x8_t _out0tm5 = vld1q_f16(output0_tm_5); float16x8_t _tmp02a = vaddq_f16(_out0tm1, _out0tm2); float16x8_t _tmp13a = vsubq_f16(_out0tm1, _out0tm2); float16x8_t _tmp02b = vaddq_f16(_out0tm3, _out0tm4); float16x8_t _tmp13b = vsubq_f16(_out0tm3, _out0tm4); float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp02a), _tmp02b); float16x8_t _tmp1m = vfmaq_n_f16(_tmp13a, _tmp13b, 2.f); float16x8_t _tmp2m = vfmaq_n_f16(_tmp02a, _tmp02b, 4.f); float16x8_t _tmp3m = vfmaq_n_f16(vaddq_f16(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); vst1q_f16(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 48; output0_tm_1 += tiles * 48; output0_tm_2 += tiles * 48; output0_tm_3 += tiles * 48; output0_tm_4 += tiles * 48; output0_tm_5 += tiles * 48; } for (int m = 0; m < 4; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp02a = vaddq_f16(_tmp01, _tmp02); float16x8_t _tmp13a = vsubq_f16(_tmp01, _tmp02); float16x8_t _tmp02b = vaddq_f16(_tmp03, _tmp04); float16x8_t _tmp13b = vsubq_f16(_tmp03, _tmp04); float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp02a), _tmp02b)); float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp13a, _tmp13b, 2.f)); float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp02a, _tmp02b, 4.f)); float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vaddq_f16(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1q_f16(output0, _out00); vst1q_f16(output0 + 8, _out01); vst1q_f16(output0 + 16, _out02); vst1q_f16(output0 + 24, _out03); output0 += outw * 8; } } } } }
analyze.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % AAA N N AAA L Y Y ZZZZZ EEEEE % % A A NN N A A L Y Y ZZ E % % AAAAA N N N AAAAA L Y ZZZ EEE % % A A N NN A A L Y ZZ E % % A A N N A A LLLLL Y ZZZZZ EEEEE % % % % Analyze An Image % % % % Software Design % % Bill Corbis % % December 1998 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % */ /* Include declarations. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <assert.h> #include <math.h> #include "magick/studio.h" #include "magick/MagickCore.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % a n a l y z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % analyzeImage() computes the brightness and saturation mean, standard % deviation, kurtosis and skewness and stores these values as attributes % of the image. % % The format of the analyzeImage method is: % % size_t analyzeImage(Image *images,const int argc,char **argv, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the address of a structure of type Image. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o exception: return any errors or warnings in this structure. % */ typedef struct _StatisticsInfo { double area, brightness, mean, standard_deviation, sum[5], kurtosis, skewness; } StatisticsInfo; static inline int GetMagickNumberThreads(const Image *source, const Image *destination,const size_t chunk,int multithreaded) { #define MagickMax(x,y) (((x) > (y)) ? (x) : (y)) #define MagickMin(x,y) (((x) < (y)) ? (x) : (y)) /* Number of threads bounded by the amount of work and any thread resource limit. The limit is 2 if the pixel cache type is not memory or memory-mapped. */ if (multithreaded == 0) return(1); if (((GetImagePixelCacheType(source) != MemoryCache) && (GetImagePixelCacheType(source) != MapCache)) || ((GetImagePixelCacheType(destination) != MemoryCache) && (GetImagePixelCacheType(destination) != MapCache))) return(MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1)); return(MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource), (ssize_t) (chunk)/64),1)); } ModuleExport size_t analyzeImage(Image **images,const int argc, const char **argv,ExceptionInfo *exception) { #define AnalyzeImageFilterTag "Filter/Analyze" #define magick_number_threads(source,destination,chunk,multithreaded) \ num_threads(GetMagickNumberThreads(source,destination,chunk,multithreaded)) char text[MagickPathExtent]; Image *image; MagickBooleanType status; MagickOffsetType progress; assert(images != (Image **) NULL); assert(*images != (Image *) NULL); assert((*images)->signature == MagickCoreSignature); (void) argc; (void) argv; image=(*images); status=MagickTrue; progress=0; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { CacheView *image_view; double area; ssize_t y; StatisticsInfo brightness, saturation; if (status == MagickFalse) continue; (void) memset(&brightness,0,sizeof(brightness)); (void) memset(&saturation,0,sizeof(saturation)); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ shared(progress,status,brightness,saturation) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *p; ssize_t i, x; StatisticsInfo local_brightness, local_saturation; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } (void) memset(&local_brightness,0,sizeof(local_brightness)); (void) memset(&local_saturation,0,sizeof(local_saturation)); for (x=0; x < (ssize_t) image->columns; x++) { double b, h, s; ConvertRGBToHSL(GetPixelRed(p),GetPixelGreen(p),GetPixelBlue(p), &h,&s,&b); b*=QuantumRange; for (i=1; i <= 4; i++) local_brightness.sum[i]+=pow(b,(double) i); s*=QuantumRange; for (i=1; i <= 4; i++) local_saturation.sum[i]+=pow(s,(double) i); p++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (analyzeImage) #endif for (i=1; i <= 4; i++) { brightness.sum[i]+=local_brightness.sum[i]; saturation.sum[i]+=local_saturation.sum[i]; } } image_view=DestroyCacheView(image_view); area=(double) image->columns*image->rows; brightness.mean=brightness.sum[1]/area; (void) FormatLocaleString(text,MagickPathExtent,"%g",brightness.mean); (void) SetImageProperty(image,"filter:brightness:mean",text); brightness.standard_deviation=sqrt(brightness.sum[2]/area- (brightness.sum[1]/area*brightness.sum[1]/area)); (void) FormatLocaleString(text,MagickPathExtent,"%g", brightness.standard_deviation); (void) SetImageProperty(image,"filter:brightness:standard-deviation",text); if (fabs(brightness.standard_deviation) >= MagickEpsilon) brightness.kurtosis=(brightness.sum[4]/area-4.0*brightness.mean* brightness.sum[3]/area+6.0*brightness.mean*brightness.mean* brightness.sum[2]/area-3.0*brightness.mean*brightness.mean* brightness.mean*brightness.mean)/(brightness.standard_deviation* brightness.standard_deviation*brightness.standard_deviation* brightness.standard_deviation)-3.0; (void) FormatLocaleString(text,MagickPathExtent,"%g",brightness.kurtosis); (void) SetImageProperty(image,"filter:brightness:kurtosis",text); if (brightness.standard_deviation != 0) brightness.skewness=(brightness.sum[3]/area-3.0*brightness.mean* brightness.sum[2]/area+2.0*brightness.mean*brightness.mean* brightness.mean)/(brightness.standard_deviation* brightness.standard_deviation*brightness.standard_deviation); (void) FormatLocaleString(text,MagickPathExtent,"%g",brightness.skewness); (void) SetImageProperty(image,"filter:brightness:skewness",text); saturation.mean=saturation.sum[1]/area; (void) FormatLocaleString(text,MagickPathExtent,"%g",saturation.mean); (void) SetImageProperty(image,"filter:saturation:mean",text); saturation.standard_deviation=sqrt(saturation.sum[2]/area- (saturation.sum[1]/area*saturation.sum[1]/area)); (void) FormatLocaleString(text,MagickPathExtent,"%g", saturation.standard_deviation); (void) SetImageProperty(image,"filter:saturation:standard-deviation",text); if (fabs(saturation.standard_deviation) >= MagickEpsilon) saturation.kurtosis=(saturation.sum[4]/area-4.0*saturation.mean* saturation.sum[3]/area+6.0*saturation.mean*saturation.mean* saturation.sum[2]/area-3.0*saturation.mean*saturation.mean* saturation.mean*saturation.mean)/(saturation.standard_deviation* saturation.standard_deviation*saturation.standard_deviation* saturation.standard_deviation)-3.0; (void) FormatLocaleString(text,MagickPathExtent,"%g",saturation.kurtosis); (void) SetImageProperty(image,"filter:saturation:kurtosis",text); if (fabs(saturation.standard_deviation) >= MagickEpsilon) saturation.skewness=(saturation.sum[3]/area-3.0*saturation.mean* saturation.sum[2]/area+2.0*saturation.mean*saturation.mean* saturation.mean)/(saturation.standard_deviation* saturation.standard_deviation*saturation.standard_deviation); (void) FormatLocaleString(text,MagickPathExtent,"%g",saturation.skewness); (void) SetImageProperty(image,"filter:saturation:skewness",text); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AnalyzeImageFilterTag,progress, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } } return(MagickImageFilterSignature); }
lu.pluto_ancc.new_rtile.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) double L[N][N]; double U[N][N]; double A[N][N +13]; void init_arrays() { int i, j, k; /* have to initialize this matrix properly to prevent * division by zero */ for (i=0; i<N; i++) { for (j=0; j<N; j++) { L[i][j] = 0.0; U[i][j] = 0.0; } } for (i=0; i<N; i++) { for (j=0; j<=i; j++) { L[i][j] = i+j+1; U[j][i] = i+j+1; } } for (i=0; i<N; i++) { for (j=0; j<N; j++) { for (k=0; k<N; k++) { A[i][j] += L[i][k]*U[k][j]; } } } } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); register int i,j,k; register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t; register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6, newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12; register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6, newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12; /*@ begin PolySyn( l1_tiles = [T1_1,T1_2,T1_3]; l2_tiles = [T2_1,T2_2,T2_3]; hotspot_permut = PERM_B; unroll_factors = [U1,U2,U3]; parallelize = PAR; scalar_replace = SCREP; icc_vectorize = IVEC; ) @*/ int c1, c2, c3, c4, c5, c6, c7, c8, c9; register int lb, ub, lb1, ub1, lb2, ub2; /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 2.33s. */ for (c1=-1;c1<=floord(3*N-5,128);c1++) { lb1=max(max(ceild(64*c1-N+2,64),ceild(32*c1-63,96)),0); ub1=min(floord(64*c1+63,64),floord(N-1,128)); #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9) for (c2=lb1; c2<=ub1; c2++) { for (c3=max(ceild(32*c1-32*c2-1953,2016),ceild(32*c1-32*c2-31,32));c3<=floord(N-1,64);c3++) { for (c4=max(max(0,2*c1-2*c2-64*c3-62),2*c1-2*c2);c4<=min(min(min(min(2*c1-2*c2+1,floord(992*c3+961,16)),floord(N-2,32)),floord(64*c2+63,16)),floord(32*c3+31,16));c4++) { for (c5=max(max(ceild(16*c4-7,8),0),8*c2);c5<=min(8*c2+7,floord(N-1,16));c5++) { for (c6=max(max(max(max(ceild(16*c4-465,496),ceild(2*c1-2*c2-2*c3-c4-31,31)),ceild(-2*c1+2*c2+2*c3+c4-31,33)),2*c3),ceild(16*c4-15,16));c6<=min(2*c3+1,floord(N-1,32));c6++) { if ((c1 == c2+c3) && (c4 == c6)) { for (c7=max(0,32*c6);c7<=min(min(32*c6+30,N-2),16*c5+14);c7++) { for (c8=max(16*c5,c7+1);c8<=min(16*c5+15,N-1);c8++) { A[c7][c8]=A[c7][c8]/A[c7][c7] ; for (c9=c7+1;c9<=min(32*c6+31,N-1);c9++) { A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8] ; } } } } /*@ begin Loop( transform Composite( regtile = (['c7', 'c8', 'c9'],[32, 8, 1]), permut = [(['c7'],['c8'],['c9'])], scalarreplace = (True, 'double'), vector = (True, ['ivdep','vector always'])) for (c7=max(32*c4,0);c7<=min(min(32*c6-1,16*c5+14),32*c4+31);c7++) { for (c8=max(c7+1,16*c5);c8<=min(16*c5+15,N-1);c8++) for (c9=32*c6;c9<=min(N-1,32*c6+31);c9++) { A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8] ; } } ) @*/{ for (c7t=max(32*c4,0); c7t<=min(min(32*c6-1,16*c5+14),32*c4+31)-31; c7t=c7t+32) { newlb_c8=-2147483648; newub_c8=min(16*c5+15,N-1); register int cbv_1; cbv_1=c7t+31; #pragma ivdep #pragma vector always for (c7=c7t; c7<=cbv_1; c7=c7+1) { newlb_c8=max(newlb_c8,max(c7+1,16*c5)); } for (c7=c7t; c7<=c7t+31; c7=c7+1) { for (c8=max(c7+1,16*c5); c8<=newlb_c8-1; c8=c8+1) { register int cbv_2, cbv_3; cbv_2=32*c6; cbv_3=min(N-1,32*c6+31); #pragma ivdep #pragma vector always for (c9=cbv_2; c9<=cbv_3; c9++ ) { double scv_1; scv_1=A[c9][c8]; scv_1=scv_1-A[c9][c7]*A[c7][c8]; A[c9][c8]=scv_1; } } } for (c8t=newlb_c8; c8t<=newub_c8-7; c8t=c8t+8) { register int cbv_4, cbv_5; cbv_4=32*c6; cbv_5=min(N-1,32*c6+31); #pragma ivdep #pragma vector always for (c9=cbv_4; c9<=cbv_5; c9++ ) { double scv_2, scv_3, scv_4, scv_5, scv_6, scv_7, scv_8, scv_9; double scv_10, scv_11, scv_12, scv_13, scv_14, scv_15, scv_16, scv_17; double scv_18, scv_19, scv_20, scv_21, scv_22, scv_23, scv_24, scv_25; double scv_26, scv_27, scv_28, scv_29, scv_30, scv_31, scv_32, scv_33; double scv_34, scv_35, scv_36, scv_37, scv_38, scv_39, scv_40, scv_41; scv_2=A[c9][(c7t+31)]; scv_3=A[c9][c7t]; scv_4=A[c9][(c7t+30)]; scv_5=A[c9][(c8t+7)]; scv_6=A[c9][(c7t+20)]; scv_7=A[c9][(c8t+5)]; scv_8=A[c9][(c7t+19)]; scv_9=A[c9][(c7t+27)]; scv_10=A[c9][(c7t+2)]; scv_11=A[c9][(c8t+1)]; scv_12=A[c9][(c7t+22)]; scv_13=A[c9][(c7t+5)]; scv_14=A[c9][(c7t+11)]; scv_15=A[c9][(c7t+6)]; scv_16=A[c9][(c7t+15)]; scv_17=A[c9][(c7t+1)]; scv_18=A[c9][(c7t+21)]; scv_19=A[c9][(c7t+9)]; scv_20=A[c9][(c7t+16)]; scv_21=A[c9][(c7t+12)]; scv_22=A[c9][(c7t+17)]; scv_23=A[c9][(c8t+2)]; scv_24=A[c9][c8t]; scv_25=A[c9][(c7t+23)]; scv_26=A[c9][(c7t+3)]; scv_27=A[c9][(c8t+4)]; scv_28=A[c9][(c7t+4)]; scv_29=A[c9][(c7t+25)]; scv_30=A[c9][(c8t+6)]; scv_31=A[c9][(c7t+18)]; scv_32=A[c9][(c7t+7)]; scv_33=A[c9][(c7t+14)]; scv_34=A[c9][(c7t+26)]; scv_35=A[c9][(c7t+8)]; scv_36=A[c9][(c7t+24)]; scv_37=A[c9][(c7t+29)]; scv_38=A[c9][(c7t+28)]; scv_39=A[c9][(c7t+13)]; scv_40=A[c9][(c8t+3)]; scv_41=A[c9][(c7t+10)]; scv_24=scv_24-scv_3*A[c7t][c8t]; scv_11=scv_11-scv_3*A[c7t][(c8t+1)]; scv_23=scv_23-scv_3*A[c7t][(c8t+2)]; scv_40=scv_40-scv_3*A[c7t][(c8t+3)]; scv_27=scv_27-scv_3*A[c7t][(c8t+4)]; scv_7=scv_7-scv_3*A[c7t][(c8t+5)]; scv_30=scv_30-scv_3*A[c7t][(c8t+6)]; scv_5=scv_5-scv_3*A[c7t][(c8t+7)]; scv_24=scv_24-scv_17*A[(c7t+1)][c8t]; scv_11=scv_11-scv_17*A[(c7t+1)][(c8t+1)]; scv_23=scv_23-scv_17*A[(c7t+1)][(c8t+2)]; scv_40=scv_40-scv_17*A[(c7t+1)][(c8t+3)]; scv_27=scv_27-scv_17*A[(c7t+1)][(c8t+4)]; scv_7=scv_7-scv_17*A[(c7t+1)][(c8t+5)]; scv_30=scv_30-scv_17*A[(c7t+1)][(c8t+6)]; scv_5=scv_5-scv_17*A[(c7t+1)][(c8t+7)]; scv_24=scv_24-scv_10*A[(c7t+2)][c8t]; scv_11=scv_11-scv_10*A[(c7t+2)][(c8t+1)]; scv_23=scv_23-scv_10*A[(c7t+2)][(c8t+2)]; scv_40=scv_40-scv_10*A[(c7t+2)][(c8t+3)]; scv_27=scv_27-scv_10*A[(c7t+2)][(c8t+4)]; scv_7=scv_7-scv_10*A[(c7t+2)][(c8t+5)]; scv_30=scv_30-scv_10*A[(c7t+2)][(c8t+6)]; scv_5=scv_5-scv_10*A[(c7t+2)][(c8t+7)]; scv_24=scv_24-scv_26*A[(c7t+3)][c8t]; scv_11=scv_11-scv_26*A[(c7t+3)][(c8t+1)]; scv_23=scv_23-scv_26*A[(c7t+3)][(c8t+2)]; scv_40=scv_40-scv_26*A[(c7t+3)][(c8t+3)]; scv_27=scv_27-scv_26*A[(c7t+3)][(c8t+4)]; scv_7=scv_7-scv_26*A[(c7t+3)][(c8t+5)]; scv_30=scv_30-scv_26*A[(c7t+3)][(c8t+6)]; scv_5=scv_5-scv_26*A[(c7t+3)][(c8t+7)]; scv_24=scv_24-scv_28*A[(c7t+4)][c8t]; scv_11=scv_11-scv_28*A[(c7t+4)][(c8t+1)]; scv_23=scv_23-scv_28*A[(c7t+4)][(c8t+2)]; scv_40=scv_40-scv_28*A[(c7t+4)][(c8t+3)]; scv_27=scv_27-scv_28*A[(c7t+4)][(c8t+4)]; scv_7=scv_7-scv_28*A[(c7t+4)][(c8t+5)]; scv_30=scv_30-scv_28*A[(c7t+4)][(c8t+6)]; scv_5=scv_5-scv_28*A[(c7t+4)][(c8t+7)]; scv_24=scv_24-scv_13*A[(c7t+5)][c8t]; scv_11=scv_11-scv_13*A[(c7t+5)][(c8t+1)]; scv_23=scv_23-scv_13*A[(c7t+5)][(c8t+2)]; scv_40=scv_40-scv_13*A[(c7t+5)][(c8t+3)]; scv_27=scv_27-scv_13*A[(c7t+5)][(c8t+4)]; scv_7=scv_7-scv_13*A[(c7t+5)][(c8t+5)]; scv_30=scv_30-scv_13*A[(c7t+5)][(c8t+6)]; scv_5=scv_5-scv_13*A[(c7t+5)][(c8t+7)]; scv_24=scv_24-scv_15*A[(c7t+6)][c8t]; scv_11=scv_11-scv_15*A[(c7t+6)][(c8t+1)]; scv_23=scv_23-scv_15*A[(c7t+6)][(c8t+2)]; scv_40=scv_40-scv_15*A[(c7t+6)][(c8t+3)]; scv_27=scv_27-scv_15*A[(c7t+6)][(c8t+4)]; scv_7=scv_7-scv_15*A[(c7t+6)][(c8t+5)]; scv_30=scv_30-scv_15*A[(c7t+6)][(c8t+6)]; scv_5=scv_5-scv_15*A[(c7t+6)][(c8t+7)]; scv_24=scv_24-scv_32*A[(c7t+7)][c8t]; scv_11=scv_11-scv_32*A[(c7t+7)][(c8t+1)]; scv_23=scv_23-scv_32*A[(c7t+7)][(c8t+2)]; scv_40=scv_40-scv_32*A[(c7t+7)][(c8t+3)]; scv_27=scv_27-scv_32*A[(c7t+7)][(c8t+4)]; scv_7=scv_7-scv_32*A[(c7t+7)][(c8t+5)]; scv_30=scv_30-scv_32*A[(c7t+7)][(c8t+6)]; scv_5=scv_5-scv_32*A[(c7t+7)][(c8t+7)]; scv_24=scv_24-scv_35*A[(c7t+8)][c8t]; scv_11=scv_11-scv_35*A[(c7t+8)][(c8t+1)]; scv_23=scv_23-scv_35*A[(c7t+8)][(c8t+2)]; scv_40=scv_40-scv_35*A[(c7t+8)][(c8t+3)]; scv_27=scv_27-scv_35*A[(c7t+8)][(c8t+4)]; scv_7=scv_7-scv_35*A[(c7t+8)][(c8t+5)]; scv_30=scv_30-scv_35*A[(c7t+8)][(c8t+6)]; scv_5=scv_5-scv_35*A[(c7t+8)][(c8t+7)]; scv_24=scv_24-scv_19*A[(c7t+9)][c8t]; scv_11=scv_11-scv_19*A[(c7t+9)][(c8t+1)]; scv_23=scv_23-scv_19*A[(c7t+9)][(c8t+2)]; scv_40=scv_40-scv_19*A[(c7t+9)][(c8t+3)]; scv_27=scv_27-scv_19*A[(c7t+9)][(c8t+4)]; scv_7=scv_7-scv_19*A[(c7t+9)][(c8t+5)]; scv_30=scv_30-scv_19*A[(c7t+9)][(c8t+6)]; scv_5=scv_5-scv_19*A[(c7t+9)][(c8t+7)]; scv_24=scv_24-scv_41*A[(c7t+10)][c8t]; scv_11=scv_11-scv_41*A[(c7t+10)][(c8t+1)]; scv_23=scv_23-scv_41*A[(c7t+10)][(c8t+2)]; scv_40=scv_40-scv_41*A[(c7t+10)][(c8t+3)]; scv_27=scv_27-scv_41*A[(c7t+10)][(c8t+4)]; scv_7=scv_7-scv_41*A[(c7t+10)][(c8t+5)]; scv_30=scv_30-scv_41*A[(c7t+10)][(c8t+6)]; scv_5=scv_5-scv_41*A[(c7t+10)][(c8t+7)]; scv_24=scv_24-scv_14*A[(c7t+11)][c8t]; scv_11=scv_11-scv_14*A[(c7t+11)][(c8t+1)]; scv_23=scv_23-scv_14*A[(c7t+11)][(c8t+2)]; scv_40=scv_40-scv_14*A[(c7t+11)][(c8t+3)]; scv_27=scv_27-scv_14*A[(c7t+11)][(c8t+4)]; scv_7=scv_7-scv_14*A[(c7t+11)][(c8t+5)]; scv_30=scv_30-scv_14*A[(c7t+11)][(c8t+6)]; scv_5=scv_5-scv_14*A[(c7t+11)][(c8t+7)]; scv_24=scv_24-scv_21*A[(c7t+12)][c8t]; scv_11=scv_11-scv_21*A[(c7t+12)][(c8t+1)]; scv_23=scv_23-scv_21*A[(c7t+12)][(c8t+2)]; scv_40=scv_40-scv_21*A[(c7t+12)][(c8t+3)]; scv_27=scv_27-scv_21*A[(c7t+12)][(c8t+4)]; scv_7=scv_7-scv_21*A[(c7t+12)][(c8t+5)]; scv_30=scv_30-scv_21*A[(c7t+12)][(c8t+6)]; scv_5=scv_5-scv_21*A[(c7t+12)][(c8t+7)]; scv_24=scv_24-scv_39*A[(c7t+13)][c8t]; scv_11=scv_11-scv_39*A[(c7t+13)][(c8t+1)]; scv_23=scv_23-scv_39*A[(c7t+13)][(c8t+2)]; scv_40=scv_40-scv_39*A[(c7t+13)][(c8t+3)]; scv_27=scv_27-scv_39*A[(c7t+13)][(c8t+4)]; scv_7=scv_7-scv_39*A[(c7t+13)][(c8t+5)]; scv_30=scv_30-scv_39*A[(c7t+13)][(c8t+6)]; scv_5=scv_5-scv_39*A[(c7t+13)][(c8t+7)]; scv_24=scv_24-scv_33*A[(c7t+14)][c8t]; scv_11=scv_11-scv_33*A[(c7t+14)][(c8t+1)]; scv_23=scv_23-scv_33*A[(c7t+14)][(c8t+2)]; scv_40=scv_40-scv_33*A[(c7t+14)][(c8t+3)]; scv_27=scv_27-scv_33*A[(c7t+14)][(c8t+4)]; scv_7=scv_7-scv_33*A[(c7t+14)][(c8t+5)]; scv_30=scv_30-scv_33*A[(c7t+14)][(c8t+6)]; scv_5=scv_5-scv_33*A[(c7t+14)][(c8t+7)]; scv_24=scv_24-scv_16*A[(c7t+15)][c8t]; scv_11=scv_11-scv_16*A[(c7t+15)][(c8t+1)]; scv_23=scv_23-scv_16*A[(c7t+15)][(c8t+2)]; scv_40=scv_40-scv_16*A[(c7t+15)][(c8t+3)]; scv_27=scv_27-scv_16*A[(c7t+15)][(c8t+4)]; scv_7=scv_7-scv_16*A[(c7t+15)][(c8t+5)]; scv_30=scv_30-scv_16*A[(c7t+15)][(c8t+6)]; scv_5=scv_5-scv_16*A[(c7t+15)][(c8t+7)]; scv_24=scv_24-scv_20*A[(c7t+16)][c8t]; scv_11=scv_11-scv_20*A[(c7t+16)][(c8t+1)]; scv_23=scv_23-scv_20*A[(c7t+16)][(c8t+2)]; scv_40=scv_40-scv_20*A[(c7t+16)][(c8t+3)]; scv_27=scv_27-scv_20*A[(c7t+16)][(c8t+4)]; scv_7=scv_7-scv_20*A[(c7t+16)][(c8t+5)]; scv_30=scv_30-scv_20*A[(c7t+16)][(c8t+6)]; scv_5=scv_5-scv_20*A[(c7t+16)][(c8t+7)]; scv_24=scv_24-scv_22*A[(c7t+17)][c8t]; scv_11=scv_11-scv_22*A[(c7t+17)][(c8t+1)]; scv_23=scv_23-scv_22*A[(c7t+17)][(c8t+2)]; scv_40=scv_40-scv_22*A[(c7t+17)][(c8t+3)]; scv_27=scv_27-scv_22*A[(c7t+17)][(c8t+4)]; scv_7=scv_7-scv_22*A[(c7t+17)][(c8t+5)]; scv_30=scv_30-scv_22*A[(c7t+17)][(c8t+6)]; scv_5=scv_5-scv_22*A[(c7t+17)][(c8t+7)]; scv_24=scv_24-scv_31*A[(c7t+18)][c8t]; scv_11=scv_11-scv_31*A[(c7t+18)][(c8t+1)]; scv_23=scv_23-scv_31*A[(c7t+18)][(c8t+2)]; scv_40=scv_40-scv_31*A[(c7t+18)][(c8t+3)]; scv_27=scv_27-scv_31*A[(c7t+18)][(c8t+4)]; scv_7=scv_7-scv_31*A[(c7t+18)][(c8t+5)]; scv_30=scv_30-scv_31*A[(c7t+18)][(c8t+6)]; scv_5=scv_5-scv_31*A[(c7t+18)][(c8t+7)]; scv_24=scv_24-scv_8*A[(c7t+19)][c8t]; scv_11=scv_11-scv_8*A[(c7t+19)][(c8t+1)]; scv_23=scv_23-scv_8*A[(c7t+19)][(c8t+2)]; scv_40=scv_40-scv_8*A[(c7t+19)][(c8t+3)]; scv_27=scv_27-scv_8*A[(c7t+19)][(c8t+4)]; scv_7=scv_7-scv_8*A[(c7t+19)][(c8t+5)]; scv_30=scv_30-scv_8*A[(c7t+19)][(c8t+6)]; scv_5=scv_5-scv_8*A[(c7t+19)][(c8t+7)]; scv_24=scv_24-scv_6*A[(c7t+20)][c8t]; scv_11=scv_11-scv_6*A[(c7t+20)][(c8t+1)]; scv_23=scv_23-scv_6*A[(c7t+20)][(c8t+2)]; scv_40=scv_40-scv_6*A[(c7t+20)][(c8t+3)]; scv_27=scv_27-scv_6*A[(c7t+20)][(c8t+4)]; scv_7=scv_7-scv_6*A[(c7t+20)][(c8t+5)]; scv_30=scv_30-scv_6*A[(c7t+20)][(c8t+6)]; scv_5=scv_5-scv_6*A[(c7t+20)][(c8t+7)]; scv_24=scv_24-scv_18*A[(c7t+21)][c8t]; scv_11=scv_11-scv_18*A[(c7t+21)][(c8t+1)]; scv_23=scv_23-scv_18*A[(c7t+21)][(c8t+2)]; scv_40=scv_40-scv_18*A[(c7t+21)][(c8t+3)]; scv_27=scv_27-scv_18*A[(c7t+21)][(c8t+4)]; scv_7=scv_7-scv_18*A[(c7t+21)][(c8t+5)]; scv_30=scv_30-scv_18*A[(c7t+21)][(c8t+6)]; scv_5=scv_5-scv_18*A[(c7t+21)][(c8t+7)]; scv_24=scv_24-scv_12*A[(c7t+22)][c8t]; scv_11=scv_11-scv_12*A[(c7t+22)][(c8t+1)]; scv_23=scv_23-scv_12*A[(c7t+22)][(c8t+2)]; scv_40=scv_40-scv_12*A[(c7t+22)][(c8t+3)]; scv_27=scv_27-scv_12*A[(c7t+22)][(c8t+4)]; scv_7=scv_7-scv_12*A[(c7t+22)][(c8t+5)]; scv_30=scv_30-scv_12*A[(c7t+22)][(c8t+6)]; scv_5=scv_5-scv_12*A[(c7t+22)][(c8t+7)]; scv_24=scv_24-scv_25*A[(c7t+23)][c8t]; scv_11=scv_11-scv_25*A[(c7t+23)][(c8t+1)]; scv_23=scv_23-scv_25*A[(c7t+23)][(c8t+2)]; scv_40=scv_40-scv_25*A[(c7t+23)][(c8t+3)]; scv_27=scv_27-scv_25*A[(c7t+23)][(c8t+4)]; scv_7=scv_7-scv_25*A[(c7t+23)][(c8t+5)]; scv_30=scv_30-scv_25*A[(c7t+23)][(c8t+6)]; scv_5=scv_5-scv_25*A[(c7t+23)][(c8t+7)]; scv_24=scv_24-scv_36*A[(c7t+24)][c8t]; scv_11=scv_11-scv_36*A[(c7t+24)][(c8t+1)]; scv_23=scv_23-scv_36*A[(c7t+24)][(c8t+2)]; scv_40=scv_40-scv_36*A[(c7t+24)][(c8t+3)]; scv_27=scv_27-scv_36*A[(c7t+24)][(c8t+4)]; scv_7=scv_7-scv_36*A[(c7t+24)][(c8t+5)]; scv_30=scv_30-scv_36*A[(c7t+24)][(c8t+6)]; scv_5=scv_5-scv_36*A[(c7t+24)][(c8t+7)]; scv_24=scv_24-scv_29*A[(c7t+25)][c8t]; scv_11=scv_11-scv_29*A[(c7t+25)][(c8t+1)]; scv_23=scv_23-scv_29*A[(c7t+25)][(c8t+2)]; scv_40=scv_40-scv_29*A[(c7t+25)][(c8t+3)]; scv_27=scv_27-scv_29*A[(c7t+25)][(c8t+4)]; scv_7=scv_7-scv_29*A[(c7t+25)][(c8t+5)]; scv_30=scv_30-scv_29*A[(c7t+25)][(c8t+6)]; scv_5=scv_5-scv_29*A[(c7t+25)][(c8t+7)]; scv_24=scv_24-scv_34*A[(c7t+26)][c8t]; scv_11=scv_11-scv_34*A[(c7t+26)][(c8t+1)]; scv_23=scv_23-scv_34*A[(c7t+26)][(c8t+2)]; scv_40=scv_40-scv_34*A[(c7t+26)][(c8t+3)]; scv_27=scv_27-scv_34*A[(c7t+26)][(c8t+4)]; scv_7=scv_7-scv_34*A[(c7t+26)][(c8t+5)]; scv_30=scv_30-scv_34*A[(c7t+26)][(c8t+6)]; scv_5=scv_5-scv_34*A[(c7t+26)][(c8t+7)]; scv_24=scv_24-scv_9*A[(c7t+27)][c8t]; scv_11=scv_11-scv_9*A[(c7t+27)][(c8t+1)]; scv_23=scv_23-scv_9*A[(c7t+27)][(c8t+2)]; scv_40=scv_40-scv_9*A[(c7t+27)][(c8t+3)]; scv_27=scv_27-scv_9*A[(c7t+27)][(c8t+4)]; scv_7=scv_7-scv_9*A[(c7t+27)][(c8t+5)]; scv_30=scv_30-scv_9*A[(c7t+27)][(c8t+6)]; scv_5=scv_5-scv_9*A[(c7t+27)][(c8t+7)]; scv_24=scv_24-scv_38*A[(c7t+28)][c8t]; scv_11=scv_11-scv_38*A[(c7t+28)][(c8t+1)]; scv_23=scv_23-scv_38*A[(c7t+28)][(c8t+2)]; scv_40=scv_40-scv_38*A[(c7t+28)][(c8t+3)]; scv_27=scv_27-scv_38*A[(c7t+28)][(c8t+4)]; scv_7=scv_7-scv_38*A[(c7t+28)][(c8t+5)]; scv_30=scv_30-scv_38*A[(c7t+28)][(c8t+6)]; scv_5=scv_5-scv_38*A[(c7t+28)][(c8t+7)]; scv_24=scv_24-scv_37*A[(c7t+29)][c8t]; scv_11=scv_11-scv_37*A[(c7t+29)][(c8t+1)]; scv_23=scv_23-scv_37*A[(c7t+29)][(c8t+2)]; scv_40=scv_40-scv_37*A[(c7t+29)][(c8t+3)]; scv_27=scv_27-scv_37*A[(c7t+29)][(c8t+4)]; scv_7=scv_7-scv_37*A[(c7t+29)][(c8t+5)]; scv_30=scv_30-scv_37*A[(c7t+29)][(c8t+6)]; scv_5=scv_5-scv_37*A[(c7t+29)][(c8t+7)]; scv_24=scv_24-scv_4*A[(c7t+30)][c8t]; scv_11=scv_11-scv_4*A[(c7t+30)][(c8t+1)]; scv_23=scv_23-scv_4*A[(c7t+30)][(c8t+2)]; scv_40=scv_40-scv_4*A[(c7t+30)][(c8t+3)]; scv_27=scv_27-scv_4*A[(c7t+30)][(c8t+4)]; scv_7=scv_7-scv_4*A[(c7t+30)][(c8t+5)]; scv_30=scv_30-scv_4*A[(c7t+30)][(c8t+6)]; scv_5=scv_5-scv_4*A[(c7t+30)][(c8t+7)]; scv_24=scv_24-scv_2*A[(c7t+31)][c8t]; scv_11=scv_11-scv_2*A[(c7t+31)][(c8t+1)]; scv_23=scv_23-scv_2*A[(c7t+31)][(c8t+2)]; scv_40=scv_40-scv_2*A[(c7t+31)][(c8t+3)]; scv_27=scv_27-scv_2*A[(c7t+31)][(c8t+4)]; scv_7=scv_7-scv_2*A[(c7t+31)][(c8t+5)]; scv_30=scv_30-scv_2*A[(c7t+31)][(c8t+6)]; scv_5=scv_5-scv_2*A[(c7t+31)][(c8t+7)]; A[c9][(c8t+7)]=scv_5; A[c9][(c8t+5)]=scv_7; A[c9][(c8t+1)]=scv_11; A[c9][(c8t+2)]=scv_23; A[c9][c8t]=scv_24; A[c9][(c8t+4)]=scv_27; A[c9][(c8t+6)]=scv_30; A[c9][(c8t+3)]=scv_40; } } for (c8=c8t; c8<=newub_c8; c8=c8+1) { register int cbv_6, cbv_7; cbv_6=32*c6; cbv_7=min(N-1,32*c6+31); #pragma ivdep #pragma vector always for (c9=cbv_6; c9<=cbv_7; c9++ ) { double scv_42; scv_42=A[c9][c8]; scv_42=scv_42-A[c9][c7t]*A[c7t][c8]; scv_42=scv_42-A[c9][(c7t+1)]*A[(c7t+1)][c8]; scv_42=scv_42-A[c9][(c7t+2)]*A[(c7t+2)][c8]; scv_42=scv_42-A[c9][(c7t+3)]*A[(c7t+3)][c8]; scv_42=scv_42-A[c9][(c7t+4)]*A[(c7t+4)][c8]; scv_42=scv_42-A[c9][(c7t+5)]*A[(c7t+5)][c8]; scv_42=scv_42-A[c9][(c7t+6)]*A[(c7t+6)][c8]; scv_42=scv_42-A[c9][(c7t+7)]*A[(c7t+7)][c8]; scv_42=scv_42-A[c9][(c7t+8)]*A[(c7t+8)][c8]; scv_42=scv_42-A[c9][(c7t+9)]*A[(c7t+9)][c8]; scv_42=scv_42-A[c9][(c7t+10)]*A[(c7t+10)][c8]; scv_42=scv_42-A[c9][(c7t+11)]*A[(c7t+11)][c8]; scv_42=scv_42-A[c9][(c7t+12)]*A[(c7t+12)][c8]; scv_42=scv_42-A[c9][(c7t+13)]*A[(c7t+13)][c8]; scv_42=scv_42-A[c9][(c7t+14)]*A[(c7t+14)][c8]; scv_42=scv_42-A[c9][(c7t+15)]*A[(c7t+15)][c8]; scv_42=scv_42-A[c9][(c7t+16)]*A[(c7t+16)][c8]; scv_42=scv_42-A[c9][(c7t+17)]*A[(c7t+17)][c8]; scv_42=scv_42-A[c9][(c7t+18)]*A[(c7t+18)][c8]; scv_42=scv_42-A[c9][(c7t+19)]*A[(c7t+19)][c8]; scv_42=scv_42-A[c9][(c7t+20)]*A[(c7t+20)][c8]; scv_42=scv_42-A[c9][(c7t+21)]*A[(c7t+21)][c8]; scv_42=scv_42-A[c9][(c7t+22)]*A[(c7t+22)][c8]; scv_42=scv_42-A[c9][(c7t+23)]*A[(c7t+23)][c8]; scv_42=scv_42-A[c9][(c7t+24)]*A[(c7t+24)][c8]; scv_42=scv_42-A[c9][(c7t+25)]*A[(c7t+25)][c8]; scv_42=scv_42-A[c9][(c7t+26)]*A[(c7t+26)][c8]; scv_42=scv_42-A[c9][(c7t+27)]*A[(c7t+27)][c8]; scv_42=scv_42-A[c9][(c7t+28)]*A[(c7t+28)][c8]; scv_42=scv_42-A[c9][(c7t+29)]*A[(c7t+29)][c8]; scv_42=scv_42-A[c9][(c7t+30)]*A[(c7t+30)][c8]; scv_42=scv_42-A[c9][(c7t+31)]*A[(c7t+31)][c8]; A[c9][c8]=scv_42; } } for (c7=c7t; c7<=c7t+31; c7=c7+1) { for (c8=newub_c8+1; c8<=min(16*c5+15,N-1); c8=c8+1) { register int cbv_8, cbv_9; cbv_8=32*c6; cbv_9=min(N-1,32*c6+31); #pragma ivdep #pragma vector always for (c9=cbv_8; c9<=cbv_9; c9++ ) { double scv_43; scv_43=A[c9][c8]; scv_43=scv_43-A[c9][c7]*A[c7][c8]; A[c9][c8]=scv_43; } } } } for (c7=c7t; c7<=min(min(32*c6-1,16*c5+14),32*c4+31); c7=c7+1) { for (c8t=max(c7+1,16*c5); c8t<=min(16*c5+15,N-1)-7; c8t=c8t+8) { register int cbv_10, cbv_11; cbv_10=32*c6; cbv_11=min(N-1,32*c6+31); #pragma ivdep #pragma vector always for (c9=cbv_10; c9<=cbv_11; c9++ ) { double scv_44, scv_45, scv_46, scv_47, scv_48, scv_49, scv_50, scv_51; double scv_52; scv_44=A[c9][(c8t+6)]; scv_45=A[c9][(c8t+4)]; scv_46=A[c9][(c8t+5)]; scv_47=A[c9][(c8t+2)]; scv_48=A[c9][c8t]; scv_49=A[c9][(c8t+3)]; scv_50=A[c9][c7]; scv_51=A[c9][(c8t+7)]; scv_52=A[c9][(c8t+1)]; scv_48=scv_48-scv_50*A[c7][c8t]; scv_52=scv_52-scv_50*A[c7][(c8t+1)]; scv_47=scv_47-scv_50*A[c7][(c8t+2)]; scv_49=scv_49-scv_50*A[c7][(c8t+3)]; scv_45=scv_45-scv_50*A[c7][(c8t+4)]; scv_46=scv_46-scv_50*A[c7][(c8t+5)]; scv_44=scv_44-scv_50*A[c7][(c8t+6)]; scv_51=scv_51-scv_50*A[c7][(c8t+7)]; A[c9][(c8t+6)]=scv_44; A[c9][(c8t+4)]=scv_45; A[c9][(c8t+5)]=scv_46; A[c9][(c8t+2)]=scv_47; A[c9][c8t]=scv_48; A[c9][(c8t+3)]=scv_49; A[c9][(c8t+7)]=scv_51; A[c9][(c8t+1)]=scv_52; } } for (c8=c8t; c8<=min(16*c5+15,N-1); c8=c8+1) { register int cbv_12, cbv_13; cbv_12=32*c6; cbv_13=min(N-1,32*c6+31); #pragma ivdep #pragma vector always for (c9=cbv_12; c9<=cbv_13; c9++ ) { double scv_53; scv_53=A[c9][c8]; scv_53=scv_53-A[c9][c7]*A[c7][c8]; A[c9][c8]=scv_53; } } } } /*@ end @*/ if ((c1 == c2+c3) && (-c4 == -c6) && (c4 <= min(floord(N-33,32),floord(16*c5-17,32)))) { for (c8=max(16*c5,32*c4+32);c8<=min(N-1,16*c5+15);c8++) { A[32*c4+31][c8]=A[32*c4+31][c8]/A[32*c4+31][32*c4+31] ; } } } } } } } } /* End of CLooG code */ /*@ end @*/ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; printf("%f\n", annot_t_total); return ((int) A[0][0]); }
simd_utils_avx_int32.h
/* * Project : SIMD_Utils * Version : 0.2.2 * Author : JishinMaster * Licence : BSD-2 */ #pragma once #include <stdint.h> #include "immintrin.h" #ifdef __AVX2__ static inline void add256s(int32_t *src1, int32_t *src2, int32_t *dst, int len) { int stop_len = len / AVX_LEN_INT32; stop_len *= AVX_LEN_INT32; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_store_si256((__m256i *) (dst + i), _mm256_add_epi32(_mm256_load_si256((__m256i *) (src1 + i)), _mm256_load_si256((__m256i *) (src2 + i)))); } } else { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_storeu_si256((__m256i *) (dst + i), _mm256_add_epi32(_mm256_loadu_si256((__m256i *) (src1 + i)), _mm256_loadu_si256((__m256i *) (src2 + i)))); } } for (int i = stop_len; i < len; i++) { dst[i] = src1[i] + src2[i]; } } #if 0 //Work in progress static inline void mul256s(int32_t *src1, int32_t *src2, int32_t *dst, int len) { int stop_len = len / AVX_LEN_INT32; stop_len *= AVX_LEN_INT32; if (areAligned3((uintptr_t)(src1), (uintptr_t)(src2), (uintptr_t)(dst), AVX_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_store_si256((__m256i *) (dst + i), _mm256_mul_epi32(_mm256_load_si256((__m256i *) (src1 + i)), _mm256_load_si256((__m256i *) (src2 + i)))); } } else { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_storeu_si256((__m256i *) (dst + i), _mm256_mul_epi32(_mm256_loadu_si256((__m256i *) (src1 + i)), _mm256_loadu_si256((__m256i *) (src2 + i)))); } } for (int i = stop_len; i < len; i++) { dst[i] = src1[i] * src2[i]; } } #endif static inline void sub256s(int32_t *src1, int32_t *src2, int32_t *dst, int len) { int stop_len = len / AVX_LEN_INT32; stop_len *= AVX_LEN_INT32; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_store_si256((__m256i *) (dst + i), _mm256_sub_epi32(_mm256_load_si256((__m256i *) (src1 + i)), _mm256_load_si256((__m256i *) (src2 + i)))); } } else { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_storeu_si256((__m256i *) (dst + i), _mm256_sub_epi32(_mm256_loadu_si256((__m256i *) (src1 + i)), _mm256_loadu_si256((__m256i *) (src2 + i)))); } } for (int i = stop_len; i < len; i++) { dst[i] = src1[i] - src2[i]; } } static inline void addc256s(int32_t *src, int32_t value, int32_t *dst, int len) { int stop_len = len / AVX_LEN_INT32; stop_len *= AVX_LEN_INT32; const v8si tmp = _mm256_set1_epi32(value); if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_store_si256((__m256i *) (dst + i), _mm256_add_epi32(tmp, _mm256_load_si256((__m256i *) (src + i)))); } } else { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_storeu_si256((__m256i *) (dst + i), _mm256_add_epi32(tmp, _mm256_loadu_si256((__m256i *) (src + i)))); } } for (int i = stop_len; i < len; i++) { dst[i] = src[i] + value; } } static inline void vectorSlope256s(int *dst, int len, int offset, int slope) { v8si coef = _mm256_set_epi32(7 * slope, 6 * slope, 5 * slope, 4 * slope, 3 * slope, 2 * slope, slope, 0); v8si slope16_vec = _mm256_set1_epi32(16 * slope); v8si curVal = _mm256_add_epi32(_mm256_set1_epi32(offset), coef); v8si curVal2 = _mm256_add_epi32(_mm256_set1_epi32(offset), coef); curVal2 = _mm256_add_epi32(curVal2, _mm256_set1_epi32(8 * slope)); int stop_len = len / (2 * AVX_LEN_INT32); stop_len *= (2 * AVX_LEN_INT32); if (((uintptr_t) (const void *) (dst) % AVX_LEN_BYTES) == 0) { _mm256_store_epi32((__m256i *) (dst + 0), curVal); _mm256_store_epi32((__m256i *) (dst + AVX_LEN_INT32), curVal2); } else { _mm256_storeu_si256((__m256i *) (dst + 0), curVal); _mm256_storeu_si256((__m256i *) (dst + AVX_LEN_INT32), curVal2); } if (((uintptr_t) (const void *) (dst) % AVX_LEN_BYTES) == 0) { for (int i = 2 * AVX_LEN_INT32; i < stop_len; i += 2 * AVX_LEN_INT32) { curVal = _mm256_add_epi32(curVal, slope16_vec); _mm256_store_si256((__m256i *) (dst + i), curVal); curVal2 = _mm256_add_epi32(curVal2, slope16_vec); _mm256_store_si256((__m256i *) (dst + i + AVX_LEN_INT32), curVal2); } } else { for (int i = 2 * AVX_LEN_INT32; i < stop_len; i += 2 * AVX_LEN_INT32) { curVal = _mm256_add_epi32(curVal, slope16_vec); _mm256_storeu_si256((__m256i *) (dst + i), curVal); curVal2 = _mm256_add_epi32(curVal2, slope16_vec); _mm256_storeu_si256((__m256i *) (dst + i + AVX_LEN_INT32), curVal2); } } for (int i = stop_len; i < len; i++) { dst[i] = offset + slope * i; } } // Experimental static inline void copy256s(int32_t *src, int32_t *dst, int len) { int stop_len = len / AVX_LEN_INT32; stop_len *= AVX_LEN_INT32; #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_store_si256((__m256i *) (dst + i), _mm256_load_si256((__m256i *) (src + i))); } for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void copy256s_2(int32_t *src, int32_t *dst, int len) { int stop_len = len / (2 * AVX_LEN_INT32); stop_len *= (2 * AVX_LEN_INT32); #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += 2 * AVX_LEN_INT32) { __m256i tmp1 = _mm256_load_si256((__m256i *) (src + i)); __m256i tmp2 = _mm256_load_si256((__m256i *) (src + i + AVX_LEN_INT32)); _mm256_store_si256((__m256i *) (dst + i), tmp1); _mm256_store_si256((__m256i *) (dst + i + AVX_LEN_INT32), tmp2); } for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void fast_copy256s(int32_t *src, int32_t *dst, int len) { int stop_len = len / AVX_LEN_INT32; stop_len *= AVX_LEN_INT32; #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { _mm256_stream_si256((__m256i *) (dst + i), _mm256_stream_load_si256((__m256i *) (src + i))); } _mm_mfence(); for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void fast_copy256s_2(int32_t *src, int32_t *dst, int len) { int stop_len = len / (2 * AVX_LEN_INT32); stop_len *= (2 * AVX_LEN_INT32); #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += 2 * AVX_LEN_INT32) { __m256i tmp1 = _mm256_stream_load_si256((__m256i *) (src + i)); __m256i tmp2 = _mm256_stream_load_si256((__m256i *) (src + i + AVX_LEN_INT32)); _mm256_stream_si256((__m256i *) (dst + i), tmp1); _mm256_stream_si256((__m256i *) (dst + i + AVX_LEN_INT32), tmp2); } _mm_mfence(); for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline void fast_copy256s_4(int32_t *src, int32_t *dst, int len) { int stop_len = len / (4 * AVX_LEN_INT32); stop_len *= (4 * AVX_LEN_INT32); #ifdef OMP #pragma omp parallel for schedule(auto) #endif for (int i = 0; i < stop_len; i += 4 * AVX_LEN_INT32) { __m256i tmp1 = _mm256_stream_load_si256((__m256i *) (src + i)); __m256i tmp2 = _mm256_stream_load_si256((__m256i *) (src + i + AVX_LEN_INT32)); __m256i tmp3 = _mm256_stream_load_si256((__m256i *) (src + i + 2 * AVX_LEN_INT32)); __m256i tmp4 = _mm256_stream_load_si256((__m256i *) (src + i + 3 * AVX_LEN_INT32)); _mm256_stream_si256((__m256i *) (dst + i), tmp1); _mm256_stream_si256((__m256i *) (dst + i + AVX_LEN_INT32), tmp2); _mm256_stream_si256((__m256i *) (dst + i + 2 * AVX_LEN_INT32), tmp3); _mm256_stream_si256((__m256i *) (dst + i + 3 * AVX_LEN_INT32), tmp4); } _mm_mfence(); for (int i = stop_len; i < len; i++) { dst[i] = src[i]; } } static inline __m256i _mm256_absdiff_epi16(__m256i a, __m256i b) { __m256i cmp, difab, difba; cmp = _mm256_cmpgt_epi16(a, b); difab = _mm256_sub_epi16(a, b); difba = _mm256_sub_epi16(b, a); difab = _mm256_and_si256(cmp, difab); difba = _mm256_andnot_si256(cmp, difba); return _mm256_or_si256(difab, difba); } static inline __m256i _mm256_absdiff_epi32(__m256i a, __m256i b) { __m256i cmp, difab, difba; cmp = _mm256_cmpgt_epi32(a, b); difab = _mm256_sub_epi32(a, b); difba = _mm256_sub_epi32(b, a); difab = _mm256_and_si256(cmp, difab); difba = _mm256_andnot_si256(cmp, difba); return _mm256_or_si256(difab, difba); } static inline __m256i _mm256_absdiff_epi8(__m256i a, __m256i b) { __m256i cmp, difab, difba; cmp = _mm256_cmpgt_epi8(a, b); difab = _mm256_sub_epi8(a, b); difba = _mm256_sub_epi8(b, a); difab = _mm256_and_si256(cmp, difab); difba = _mm256_andnot_si256(cmp, difba); return _mm256_or_si256(difab, difba); } static inline void absdiff16s_256s(int16_t *src1, int16_t *src2, int16_t *dst, int len) { int stop_len = len / AVX_LEN_INT16; stop_len *= AVX_LEN_INT16; if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX_LEN_INT16) { __m256i a = _mm256_load_si256((__m256i *) (src1 + i)); __m256i b = _mm256_load_si256((__m256i *) (src2 + i)); _mm256_store_si256((__m256i *) (dst + i), _mm256_absdiff_epi16(a, b)); } } else { for (int i = 0; i < stop_len; i += AVX_LEN_INT16) { __m256i a = _mm256_loadu_si256((__m256i *) (src1 + i)); __m256i b = _mm256_loadu_si256((__m256i *) (src2 + i)); _mm256_storeu_si256((__m256i *) (dst + i), _mm256_absdiff_epi16(a, b)); } } for (int i = stop_len; i < len; i++) { dst[i] = abs(src1[i] - src2[i]); } } static inline void powerspect16s_256s_interleaved(complex16s_t *src, int32_t *dst, int len) { int stop_len = len / AVX_LEN_INT32; stop_len *= AVX_LEN_INT32; int j = 0; if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX_LEN_BYTES)) { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { __m256i reim = _mm256_load_si256((__m256i *) ((const int16_t *) src + j)); // print8i(reim); printf("\n"); _mm256_store_si256((__m256i *) (dst + i), _mm256_madd_epi16(reim, reim)); j += AVX_LEN_INT16; } } else { for (int i = 0; i < stop_len; i += AVX_LEN_INT32) { __m256i reim = _mm256_loadu_si256((__m256i *) ((const int16_t *) src + j)); _mm256_storeu_si256((__m256i *) (dst + i), _mm256_madd_epi16(reim, reim)); j += AVX_LEN_INT16; } } for (int i = stop_len; i < len; i++) { dst[i] = (int32_t) src[i].re * (int32_t) src[i].re + (int32_t) src[i].im * (int32_t) src[i].im; } } #endif
DOMINOSEC8_fmt_plug.c
/* Cracks Notes/Domino 8+ H-hashes. Thanks to philsmd and atom, for publishing * the algorithm. * * This file is based on DOMINOSEC_fmt_plug.c (version 3). * * The following description of the algorithm was published by philsmd, on * hashcat forums. * * H-hashes depends on (both of) the older "dominosec" hash types: * Lotus Notes/Domino 5 aka SEC_pwddigest_V1, start ([0-F] and * Lotus Notes/Domino 6 aka SEC_pwddigest_V2, start (G * * You need to generate those digests/hashes first and continue with the new * algorithm starting from the encoded SEC_pwddigest_V2 hash. * * The encoding too is the same as for the other 2 algos, but the new hashes * (following SEC_pwddigest_V3) are longer and starts with '(H' * * Furthermore, the hashes themself encode the following information: * - 16 byte salt (first 5 needed for SEC_pwddigest_V2, SEC_pwddigest_V1 is unsalted) * - round number (length 10, in ascii) * - 2 additional chars * - 8 bytes (a part of the) digest * * So we start to generate the SEC_pwddigest_V2 hash with the first 5 bytes of * the salt. After that PBKDF2-HMACSHA1 is used with the now generated * "(G[known "small" salt]...)" hash as password, the full 16 bytes salt and * the round number found in the encoded hash. From the full digest (output of * PBKDF2), only the first 8 bytes of the digest are then used in the final * encoding step. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_DOMINOSEC8; #elif FMT_REGISTERS_H john_register_one(&fmt_DOMINOSEC8); #else #include <ctype.h> #include <string.h> #ifdef DOMINOSEC_32BIT #include "stdint.h" #endif #include "misc.h" #include "formats.h" #include "common.h" #undef SIMD_COEF_32 #include "pbkdf2_hmac_sha1.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "dominosec8" #define FORMAT_NAME "Lotus Notes/Domino 8" #define ALGORITHM_NAME "8/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 64 #define CIPHERTEXT_LENGTH 51 #define BINARY_SIZE 8 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define REAL_SALT_SIZE 16 #define SALT_ALIGN sizeof(ARCH_WORD_32) #define DIGEST_SIZE 16 #define BINARY_BUFFER_SIZE (DIGEST_SIZE-SALT_SIZE) #define ASCII_DIGEST_LENGTH (DIGEST_SIZE*2) #define MIN_KEYS_PER_CRYPT 3 #define MAX_KEYS_PER_CRYPT 6 static unsigned char (*digest34)[34]; static char (*saved_key)[PLAINTEXT_LENGTH+1]; static ARCH_WORD_32 (*crypt_out)[(DIGEST_SIZE + 3) / sizeof(ARCH_WORD_32)]; static ARCH_WORD_32 (*crypt_out_real)[(BINARY_SIZE) / sizeof(ARCH_WORD_32)]; static struct custom_salt { unsigned char salt[REAL_SALT_SIZE]; int iterations; unsigned char chars[3]; } *cur_salt; static int keys_changed, salt_changed; static const char hex_table[][2] = { "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "0A", "0B", "0C", "0D", "0E", "0F", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "1A", "1B", "1C", "1D", "1E", "1F", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "2A", "2B", "2C", "2D", "2E", "2F", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "3A", "3B", "3C", "3D", "3E", "3F", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "4A", "4B", "4C", "4D", "4E", "4F", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "5A", "5B", "5C", "5D", "5E", "5F", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "6A", "6B", "6C", "6D", "6E", "6F", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "7A", "7B", "7C", "7D", "7E", "7F", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "8A", "8B", "8C", "8D", "8E", "8F", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "9A", "9B", "9C", "9D", "9E", "9F", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "A9", "AA", "AB", "AC", "AD", "AE", "AF", "B0", "B1", "B2", "B3", "B4", "B5", "B6", "B7", "B8", "B9", "BA", "BB", "BC", "BD", "BE", "BF", "C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "CA", "CB", "CC", "CD", "CE", "CF", "D0", "D1", "D2", "D3", "D4", "D5", "D6", "D7", "D8", "D9", "DA", "DB", "DC", "DD", "DE", "DF", "E0", "E1", "E2", "E3", "E4", "E5", "E6", "E7", "E8", "E9", "EA", "EB", "EC", "ED", "EE", "EF", "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "FA", "FB", "FC", "FD", "FE", "FF" }; static const unsigned char lotus_magic_table[] = { 0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a, 0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0, 0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b, 0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a, 0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda, 0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36, 0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8, 0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c, 0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17, 0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60, 0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72, 0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa, 0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd, 0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e, 0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b, 0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf, 0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77, 0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6, 0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3, 0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3, 0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e, 0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c, 0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d, 0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2, 0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46, 0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5, 0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97, 0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5, 0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef, 0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f, 0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf, 0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab, /* double power! */ 0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a, 0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0, 0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b, 0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a, 0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda, 0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36 }; static struct fmt_tests tests[] = { {"(HsjFebq0Kh9kH7aAZYc7kY30mC30mC3KmC30mCluagXrvWKj1)", "hashcat"}, {"(HosOQowHtnaYQqFo/XlScup0mC30mC3KmC30mCeACAxpjQN2u)", "pleaseletmein"}, {NULL} }; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); crypt_out_real = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out_real)); digest34 = mem_calloc(self->params.max_keys_per_crypt, sizeof(*digest34)); keys_changed = salt_changed = 0; } static void done(void) { MEM_FREE(digest34); MEM_FREE(crypt_out_real); MEM_FREE(crypt_out); MEM_FREE(saved_key); } static struct { unsigned char salt[REAL_SALT_SIZE]; unsigned char iterations[10]; unsigned char chars[2]; unsigned char hash[BINARY_SIZE]; } cipher_binary_struct; static void mdtransform_norecalc_1(unsigned char state[16], unsigned char block[16]) { union { unsigned char c[48]; #ifdef DOMINOSEC_32BIT uint32_t u32[12]; #endif } x; unsigned char *p; unsigned int i, j, t; t = 0; p = x.c; for (j = 48; j > 32; j--) { t = state[p - x.c] ^ lotus_magic_table[j + t]; *p++ = t; } for (; j > 16; j--) { t = block[p - x.c - 16] ^ lotus_magic_table[j + t]; *p++ = t; } for (; j > 0; j--) { t = state[p - x.c - 32] ^ block[p - x.c - 32] ^ lotus_magic_table[j + t]; *p++ = t; } #ifndef DOMINOSEC_32BIT for (i = 0; i < 16; i++) { p = x.c; for (j = 48; j > 0; j--) { t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j-- + t]; t = *p++ ^= lotus_magic_table[j + t]; } } #else for (i = 0; i < 16; i++) { uint32_t *q = x.u32; p = x.c; for (j = 48; j > 0; j--) { uint32_t u = *q++; t = *p++ = u ^ lotus_magic_table[j-- + t]; t = *p++ = (u >> 8) ^ lotus_magic_table[j-- + t]; u >>= 16; t = *p++ = u ^ lotus_magic_table[j-- + t]; t = *p++ = (u >> 8) ^ lotus_magic_table[j + t]; } } #endif p = x.c; for (j = 48; j > 32; j--) { state[p - x.c] = t = *p ^ lotus_magic_table[j + t]; p++; } } static void mdtransform_1(unsigned char state[16], unsigned char checksum[16], unsigned char block[16]) { unsigned char c; unsigned int i, t; mdtransform_norecalc_1(state, block); t = checksum[15]; for (i = 0; i < 16; i++) { c = lotus_magic_table[block[i] ^ t]; t = checksum[i] ^= c; } } static void mdtransform_norecalc_3(unsigned char state[3][16], unsigned char block0[16], unsigned char block1[16], unsigned char block2[16]) { union { unsigned char c[48]; #ifdef DOMINOSEC_32BIT uint32_t u32[12]; #endif } x[3]; unsigned char *p0, *p1, *p2; unsigned int i, j, t0, t1, t2; t0 = t1 = t2 = 0; p0 = x[0].c; p1 = x[1].c; p2 = x[2].c; for (j = 48; j > 32; j--) { t0 = state[0][p0 - x[0].c] ^ lotus_magic_table[j + t0]; t1 = state[1][p1 - x[1].c] ^ lotus_magic_table[j + t1]; t2 = state[2][p2 - x[2].c] ^ lotus_magic_table[j + t2]; *p0++ = t0; *p1++ = t1; *p2++ = t2; } for (; j > 16; j--) { t0 = block0[p0 - x[0].c - 16] ^ lotus_magic_table[j + t0]; t1 = block1[p1 - x[1].c - 16] ^ lotus_magic_table[j + t1]; t2 = block2[p2 - x[2].c - 16] ^ lotus_magic_table[j + t2]; *p0++ = t0; *p1++ = t1; *p2++ = t2; } for (; j > 0; j--) { t0 = state[0][p0 - x[0].c - 32] ^ block0[p0 - x[0].c - 32] ^ lotus_magic_table[j + t0]; t1 = state[1][p1 - x[1].c - 32] ^ block1[p1 - x[1].c - 32] ^ lotus_magic_table[j + t1]; t2 = state[2][p2 - x[2].c - 32] ^ block2[p2 - x[2].c - 32] ^ lotus_magic_table[j + t2]; *p0++ = t0; *p1++ = t1; *p2++ = t2; } #ifndef DOMINOSEC_32BIT for (i = 0; i < 16; i++) { p0 = x[0].c; p1 = x[1].c; p2 = x[2].c; for (j = 48; j > 0; j--) { t0 = *p0++ ^= lotus_magic_table[j + t0]; t1 = *p1++ ^= lotus_magic_table[j + t1]; t2 = *p2++ ^= lotus_magic_table[j-- + t2]; t0 = *p0++ ^= lotus_magic_table[j + t0]; t1 = *p1++ ^= lotus_magic_table[j + t1]; t2 = *p2++ ^= lotus_magic_table[j-- + t2]; t0 = *p0++ ^= lotus_magic_table[j + t0]; t1 = *p1++ ^= lotus_magic_table[j + t1]; t2 = *p2++ ^= lotus_magic_table[j-- + t2]; t0 = *p0++ ^= lotus_magic_table[j + t0]; t1 = *p1++ ^= lotus_magic_table[j + t1]; t2 = *p2++ ^= lotus_magic_table[j + t2]; } } #else for (i = 0; i < 16; i++) { uint32_t *q0 = x[0].u32; uint32_t *q1 = x[1].u32; uint32_t *q2 = x[2].u32; p0 = x[0].c; p1 = x[1].c; p2 = x[2].c; for (j = 48; j > 0; j--) { uint32_t u0 = *q0++; uint32_t u1 = *q1++; uint32_t u2 = *q2++; t0 = *p0++ = u0 ^ lotus_magic_table[j + t0]; t1 = *p1++ = u1 ^ lotus_magic_table[j + t1]; t2 = *p2++ = u2 ^ lotus_magic_table[j-- + t2]; t0 = *p0++ = (u0 >> 8) ^ lotus_magic_table[j + t0]; t1 = *p1++ = (u1 >> 8) ^ lotus_magic_table[j + t1]; t2 = *p2++ = (u2 >> 8) ^ lotus_magic_table[j-- + t2]; u0 >>= 16; u1 >>= 16; u2 >>= 16; t0 = *p0++ = u0 ^ lotus_magic_table[j + t0]; t1 = *p1++ = u1 ^ lotus_magic_table[j + t1]; t2 = *p2++ = u2 ^ lotus_magic_table[j-- + t2]; t0 = *p0++ = (u0 >> 8) ^ lotus_magic_table[j + t0]; t1 = *p1++ = (u1 >> 8) ^ lotus_magic_table[j + t1]; t2 = *p2++ = (u2 >> 8) ^ lotus_magic_table[j + t2]; } } #endif p0 = x[0].c; p1 = x[1].c; p2 = x[2].c; for (j = 48; j > 32; j--) { state[0][p0 - x[0].c] = t0 = *p0 ^ lotus_magic_table[j + t0]; state[1][p1 - x[1].c] = t1 = *p1 ^ lotus_magic_table[j + t1]; state[2][p2 - x[2].c] = t2 = *p2 ^ lotus_magic_table[j + t2]; p0++; p1++; p2++; } } static void mdtransform_3(unsigned char state[3][16], unsigned char checksum[3][16], unsigned char block0[16], unsigned char block1[16], unsigned char block2[16]) { unsigned int i, t0, t1, t2; mdtransform_norecalc_3(state, block0, block1, block2); t0 = checksum[0][15]; t1 = checksum[1][15]; t2 = checksum[2][15]; for (i = 0; i < 16; i++) { t0 = checksum[0][i] ^= lotus_magic_table[block0[i] ^ t0]; t1 = checksum[1][i] ^= lotus_magic_table[block1[i] ^ t1]; t2 = checksum[2][i] ^= lotus_magic_table[block2[i] ^ t2]; } } static void domino_big_md_3(unsigned char *in0, unsigned int size0, unsigned char *in1, unsigned int size1, unsigned char *in2, unsigned int size2, unsigned char *out0, unsigned char *out1, unsigned char *out2) { unsigned char state[3][16] = {{0}, {0}, {0}}; unsigned char checksum[3][16] = {{0}, {0}, {0}}; unsigned char block[3][16]; unsigned int min, curpos = 0, curpos0, curpos1, curpos2; min = (size0 < size1) ? size0 : size1; if (size2 < min) min = size2; while (curpos + 15 < min) { mdtransform_3(state, checksum, in0 + curpos, in1 + curpos, in2 + curpos); curpos += 16; } curpos0 = curpos; while (curpos0 + 15 < size0) { mdtransform_1(state[0], checksum[0], in0 + curpos0); curpos0 += 16; } curpos1 = curpos; while (curpos1 + 15 < size1) { mdtransform_1(state[1], checksum[1], in1 + curpos1); curpos1 += 16; } curpos2 = curpos; while (curpos2 + 15 < size2) { mdtransform_1(state[2], checksum[2], in2 + curpos2); curpos2 += 16; } { unsigned int pad0 = size0 - curpos0; unsigned int pad1 = size1 - curpos1; unsigned int pad2 = size2 - curpos2; memcpy(block[0], in0 + curpos0, pad0); memcpy(block[1], in1 + curpos1, pad1); memcpy(block[2], in2 + curpos2, pad2); memset(block[0] + pad0, 16 - pad0, 16 - pad0); memset(block[1] + pad1, 16 - pad1, 16 - pad1); memset(block[2] + pad2, 16 - pad2, 16 - pad2); mdtransform_3(state, checksum, block[0], block[1], block[2]); } mdtransform_norecalc_3(state, checksum[0], checksum[1], checksum[2]); memcpy(out0, state[0], 16); memcpy(out1, state[1], 16); memcpy(out2, state[2], 16); } static void domino_big_md_3_34(unsigned char *in0, unsigned char *in1, unsigned char *in2, unsigned char *out0, unsigned char *out1, unsigned char *out2) { unsigned char state[3][16] = {{0}, {0}, {0}}; unsigned char checksum[3][16] = {{0}, {0}, {0}}; unsigned char block[3][16]; mdtransform_3(state, checksum, in0, in1, in2); mdtransform_3(state, checksum, in0 + 16, in1 + 16, in2 + 16); memcpy(block[0], in0 + 32, 2); memcpy(block[1], in1 + 32, 2); memcpy(block[2], in2 + 32, 2); memset(block[0] + 2, 14, 14); memset(block[1] + 2, 14, 14); memset(block[2] + 2, 14, 14); mdtransform_3(state, checksum, block[0], block[1], block[2]); mdtransform_norecalc_3(state, checksum[0], checksum[1], checksum[2]); memcpy(out0, state[0], 16); memcpy(out1, state[1], 16); memcpy(out2, state[2], 16); } static int valid(char *ciphertext, struct fmt_main *self) { unsigned int i; unsigned char ch; if (strlen(ciphertext) != CIPHERTEXT_LENGTH) return 0; if (ciphertext[0] != '(' || ciphertext[1] != 'H' || ciphertext[CIPHERTEXT_LENGTH-1] != ')') return 0; for (i = 1; i < CIPHERTEXT_LENGTH-1; ++i) { ch = ciphertext[i]; if (!isalnum(ch) && ch != '+' && ch != '/') return 0; } return 1; } // "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/" variant static void decode(unsigned char *ascii_cipher, unsigned char *binary) { unsigned int out = 0, apsik = 0, loop; unsigned int i; unsigned char ch; unsigned char buffer[128] = {0}; ascii_cipher += 2; i = 0; do { if (apsik < 8) { /* should be using proper_mul, but what the heck... it's nearly the same :] */ loop = 2; /* ~ loop = proper_mul(13 - apsik); */ apsik += loop*6; do { out <<= 6; ch = *ascii_cipher; if (ch < '0' || ch > '9') if (ch < 'A' || ch > 'Z') if (ch < 'a' || ch > 'z') if (ch != '+') if (ch == '/') out += '?'; else ; /* shit happens */ else out += '>'; else out += ch-'='; else out += ch-'7'; else out += ch-'0'; ++ascii_cipher; } while (--loop); } loop = apsik-8; ch = out >> loop; *(buffer+i) = ch; ch <<= loop; apsik = loop; out -= ch; } while (++i < 36); buffer[3] += -4; /* salt patching */ memcpy(binary, buffer, 36); } static void *get_binary(char *ciphertext) { static ARCH_WORD_32 out[BINARY_SIZE / sizeof(ARCH_WORD_32) + 1]; decode((unsigned char*)ciphertext, (unsigned char*)&cipher_binary_struct); memcpy(out, cipher_binary_struct.hash, BINARY_SIZE); return (void*)out; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; unsigned char buffer[11] = {0}; memset(&cs, 0, sizeof(struct custom_salt)); decode((unsigned char*)ciphertext, (unsigned char*)&cipher_binary_struct); memcpy(cs.salt, cipher_binary_struct.salt, REAL_SALT_SIZE); memcpy(cs.chars, cipher_binary_struct.chars, 2); memcpy(buffer, cipher_binary_struct.iterations, 10); cs.iterations = strtoul((char*)buffer, NULL, 10); return (void*)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt*)salt; salt_changed = 1; } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); keys_changed = 1; } static char *get_key(int index) { return saved_key[index]; } static void domino_base64_encode(uint32_t v, int n, unsigned char *out) { unsigned char itoa64[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/"; while ((n - 1) >= 0) { n = n - 1; out[n] = itoa64[v & 0x3f]; v = v >> 6; } } static void domino_encode(unsigned char *salt, unsigned char *hash) { unsigned char output[25] = {0}; int byte10 = (char)salt[3] + 4; if (byte10 > 255) byte10 = byte10 - 256; salt[3] = (char)(byte10); domino_base64_encode((salt[ 0] << 16) | (salt[ 1] << 8) | salt[ 2], 4, output); domino_base64_encode((salt[ 3] << 16) | (salt[ 4] << 8) | salt[ 5], 4, output+4); domino_base64_encode((salt[ 6] << 16) | (salt[ 7] << 8) | salt[ 8], 4, output+8); domino_base64_encode((salt[ 9] << 16) | (salt[10] << 8) | salt[11], 4, output+12); domino_base64_encode((salt[12] << 16) | (salt[13] << 8) | salt[14], 4, output+16); // if (defined ($char)) // substr ($passwd, 18, 1) = $char; output[19] = '\x00'; memcpy(hash, output, 20); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += 3) { int i, j; // domino 5 hash - SEC_pwddigest_V1 - -m 8600 if (keys_changed) { char *k0 = saved_key[index]; char *k1 = saved_key[index + 1]; char *k2 = saved_key[index + 2]; unsigned char digest16[3][16]; domino_big_md_3((unsigned char *)k0, strlen(k0), (unsigned char *)k1, strlen(k1), (unsigned char *)k2, strlen(k2), digest16[0], digest16[1], digest16[2]); // Not (++i < 16) ! // Domino will do hash of first 34 bytes ignoring The Fact that now // there is a salt at a beginning of buffer. This means that last 5 // bytes "EEFF)" of password digest are meaningless. for (i = 0, j = 6; i < 14; i++, j += 2) { const char *hex2 = hex_table[ARCH_INDEX(digest16[0][i])]; digest34[index][j] = hex2[0]; digest34[index][j + 1] = hex2[1]; hex2 = hex_table[ARCH_INDEX(digest16[1][i])]; digest34[index + 1][j] = hex2[0]; digest34[index + 1][j + 1] = hex2[1]; hex2 = hex_table[ARCH_INDEX(digest16[2][i])]; digest34[index + 2][j] = hex2[0]; digest34[index + 2][j + 1] = hex2[1]; } } // domino 6 hash - SEC_pwddigest_V2 - -m 8700 if (salt_changed) { digest34[index + 2][0] = digest34[index + 1][0] = digest34[index][0] = cur_salt->salt[0]; digest34[index + 2][1] = digest34[index + 1][1] = digest34[index][1] = cur_salt->salt[1]; digest34[index + 2][2] = digest34[index + 1][2] = digest34[index][2] = cur_salt->salt[2]; digest34[index + 2][3] = digest34[index + 1][3] = digest34[index][3] = cur_salt->salt[3]; digest34[index + 2][4] = digest34[index + 1][4] = digest34[index][4] = cur_salt->salt[4]; digest34[index + 2][5] = digest34[index + 1][5] = digest34[index][5] = '('; } domino_big_md_3_34(digest34[index], digest34[index + 1], digest34[index + 2], (unsigned char *)crypt_out[index], (unsigned char *)crypt_out[index + 1], (unsigned char *)crypt_out[index + 2]); for (i= 0; i < 3; i++) { // domino 8(.5.x) hash - SEC_pwddigest_V3 - -m 9100 unsigned char buffer[22 + 1] = {0}; unsigned char tmp_hash[22 + 1] = {0}; memcpy(tmp_hash, cur_salt->salt, 5); memcpy(tmp_hash + 5, crypt_out[index + i], 16); domino_encode(tmp_hash, buffer); sprintf((char*)tmp_hash, "(G%s)", buffer); pbkdf2_sha1(tmp_hash, 22, cur_salt->salt, 16, cur_salt->iterations, (unsigned char *)crypt_out_real[index+i], 8, 0); } } keys_changed = salt_changed = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out_real[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out_real[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static int get_hash_0(int index) { return *(ARCH_WORD_32*)&crypt_out_real[index] & PH_MASK_0; } static int get_hash_1(int index) { return *(ARCH_WORD_32*)&crypt_out_real[index] & PH_MASK_1; } static int get_hash_2(int index) { return *(ARCH_WORD_32*)&crypt_out_real[index] & PH_MASK_2; } static int get_hash_3(int index) { return *(ARCH_WORD_32*)&crypt_out_real[index] & PH_MASK_3; } static int get_hash_4(int index) { return *(ARCH_WORD_32*)&crypt_out_real[index] & PH_MASK_4; } static int get_hash_5(int index) { return *(ARCH_WORD_32*)&crypt_out_real[index] & PH_MASK_5; } static int get_hash_6(int index) { return *(ARCH_WORD_32*)&crypt_out_real[index] & PH_MASK_6; } static int salt_hash(void *salt) { //printf("salt %08x hash %03x\n", *(ARCH_WORD_32*)salt, *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1)); return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_DOMINOSEC8 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
owl_ndarray_conv_impl.h
/* * OWL - OCaml Scientific and Engineering Computing * Copyright (c) 2016-2018 Liang Wang <liang.wang@cl.cam.ac.uk> */ #ifndef OWL_CORE_CONV_IMPL #define OWL_CORE_CONV_IMPL /* * Calculate the cache sizes and block sizes for convolution operations. * Code heavily inspired by Eigen (http://eigen.tuxfamily.org/). */ #define IM2COL_THRESHOLD 512 * 1024 #define ALIGN_SIZE 32 // for AVX address alignment OWL_INLINE void query_cache_sizes_intel(int* l1p, int* l2p, int* l3p) { int cpuinfo[4]; int l1 = 0, l2 = 0, l3 = 0; int cache_id = 0; int cache_type = 0; do { cpuinfo[0] = cpuinfo[1] = cpuinfo[2] = cpuinfo[3] = 0; CPUID(cpuinfo, 0x4, cache_id); cache_type = (cpuinfo[0] & 0x0F) >> 0; if(cache_type == 1 || cache_type == 3) { int cache_level = (cpuinfo[0] & 0xE0) >> 5; int ways = (cpuinfo[1] & 0xFFC00000) >> 22; int partitions = (cpuinfo[1] & 0x003FF000) >> 12; int line_size = (cpuinfo[1] & 0x00000FFF) >> 0; int sets = (cpuinfo[2]); int cache_size = (ways + 1) * (partitions + 1) * (line_size + 1) * (sets + 1); switch(cache_level) { case 1: l1 = cache_size; break; case 2: l2 = cache_size; break; case 3: l3 = cache_size; break; default: break; } } cache_id++; } while(cache_type > 0 && cache_id < 16); *l1p = l1; *l2p = l2; *l3p = l3; return; } OWL_INLINE void query_cache_sizes(int* l1p, int* l2p, int* l3p) { if (OWL_ARCH_i386 || OWL_ARCH_x86_64) { int cpuinfo[4]; CPUID(cpuinfo, 0x0, 0); int highest_func = cpuinfo[1]; if (highest_func >= 4) query_cache_sizes_intel(l1p, l2p, l3p); else { *l1p = 32 * 1024; *l2p = 256 * 1024; *l3p = 2048 * 1024; } } else { *l1p = 16 * 1024; *l2p = 512 * 1024; *l3p = 512 * 1024; } } // The effect of calculating block size according to cache sizes is yet to be // proved here since we use OpenBLAS GEMM directly; also, note that we // calculate `InputMatrix x KernelMatrix`, not the other way around. void compute_block_sizes(int* kp, int* mp, int* np, int typesize) { int l1, l2, l3; query_cache_sizes(&l1, &l2, &l3); // set the cache sizes to small numbers when debugging int k = *kp; int m = *mp; int n = *np; if (fmaxf(k, fmaxf(m, n)) < 50) { return; } int nr = 4; int num_reg = 16; int mr = num_reg / (2 * nr) * typesize; int k_strip = 8; int k_div = (mr + nr) * typesize; int k_sub = mr * nr * typesize; const int max_kc = fmaxf(((l1 - k_sub) / k_div) & (~(k_strip - 1)), 1); const int old_k = k; if (k > max_kc) { k = (k % max_kc) == 0 ? max_kc : max_kc - k_strip * ((max_kc - 1 - (k % max_kc)) / (k_strip * (k / max_kc + 1))); //assert (old_k / k == old_k / max_kc); } int max_nc; const int actual_l2 = 1572864; // l3 for debug; otherwise 1572864 const int lhs_bytes = m * k * typesize; const int rest_l1 = l1 - k_sub - lhs_bytes; if (rest_l1 >= nr * k * typesize) { max_nc = rest_l1 / (k * typesize); } else { max_nc = (3 * actual_l2) / (4 * max_kc * typesize); } int nc = (int) (fminf(actual_l2 / (2 * k * typesize), max_nc)) & (~(nr - 1)); if (n > nc) { n = (n % nc == 0) ? nc : (nc - nr * ((nc - (n % nc)) / (nr * (n / nc + 1)))); } else if (old_k == k) { int kn_size = k * n * typesize; int actual_lm = actual_l2; int max_mc = m; if (kn_size < 1024) { actual_lm = l1; } else if (l3 != 0 && kn_size <= 32768) { actual_lm = l2; max_mc = fminf(576, max_mc); } int mc = fminf(actual_lm / (3 * k * typesize), max_mc); if (mc > mr) { mc -= mc % mr; } else if (mc == 0) { *kp = k; *mp = m; *np = n; return; } m = (m % mc == 0) ? mc : (mc - mr * ((mc - (m % mc)) / (mr * (m / mc + 1)))); } *kp = k; *mp = m; *np = n; return; } #endif /* OWL_CORE_CONV_IMPL */ #ifdef OWL_ENABLE_TEMPLATE #ifdef AVX_PSIZE /* * Fill in temporary input matrix from input tensor with vectorisation. * Currently only support AVX instruciton set. */ void ACX_FUN_LOAD (load_sub_matrix_fast, spatial) ( TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int k, int kernel_ri, int input_ri, int in_channel, int idx_base, int cstart, int rstart, int input_cols, int input_rows, short reverse_mode ) { // assume output_ptr is aligned; if in_channel % AVX_PSIZE == 0, the input // matrix can always be loaded consecutively by a step of AVX_PSIZE for (int ik = 0; ik < kc_strip; ik += AVX_PSIZE) { int kc = (k + ik) / kernel_ri; int kri = (k + ik) - kc * kernel_ri; int kr = kri / in_channel; int ki = kri - kr * in_channel; int input_col = kc + cstart; int input_row = kr + rstart; if (input_col < input_cols && input_col >= 0 && input_row < input_rows && input_row >= 0) { int input_index = idx_base + input_col * input_ri + input_row * in_channel + ki; if (reverse_mode == 0) { AVX_TYPE v = AVX_LOADU(input_ptr + input_index); AVX_STOREA(output_ptr + (*cmk_ptr), v); } else { AVX_TYPE v1 = AVX_LOADA(output_ptr + (*cmk_ptr)); AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index); AVX_TYPE v = AVX_ADD(v1, v2); AVX_STOREU(input_ptr + input_index, v); } } *cmk_ptr += AVX_PSIZE; } return; } void ACX_FUN_LOAD (load_sub_matrix, spatial) ( TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int actual_kc, int k, int kernel_ri, int input_ri, int in_channel, int idx_base, int cstart, int rstart, int input_cols, int input_rows, int kernel_rows, short reverse_mode ){ int ik = 0; // first, load `kc_strip` numbers with a step of AVX_PSIZE; // assume `kc_strip % AVX_PSIZE == 0` for ( ; ik < kc_strip; ik += AVX_PSIZE) { const int cr_set[2] = {(k + ik) / in_channel, (k + ik + AVX_PSIZE - 1) / in_channel}; const int c_set[2] = {cr_set[0] / kernel_rows, cr_set[1] / kernel_rows}; const int cols[2] = {cstart + c_set[0], cstart + c_set[1]}; // out of bounds; set the next AVX_PSIZE numbers to 0 if (cols[0] >= input_cols || cols[1] < 0) { *cmk_ptr += AVX_PSIZE; continue; } else if (cols[0] == cols[1]) { const int r_set[2] = {cr_set[0] - c_set[0] * kernel_rows, cr_set[1] - c_set[1] * kernel_rows}; const int rows[2] = {rstart + r_set[0], rstart + r_set[1]}; // out of bounds; set the next AVX_PSIZE numbers to 0 if (rows[0] >= input_rows || rows[1] < 0) { *cmk_ptr += AVX_PSIZE; continue; } // next AVX_PSIZE numbers can be loaded consecutively else if (rows[0] >= 0 && rows[1] < input_rows) { int ki = k + ik - cr_set[0] * in_channel; int input_index = idx_base + cols[0] * input_ri + rows[0] * in_channel + ki; if (reverse_mode == 0) { AVX_TYPE v = AVX_LOADU(input_ptr + input_index); AVX_STOREU(output_ptr + (*cmk_ptr), v); } else { AVX_TYPE v1 = AVX_LOADU(output_ptr + (*cmk_ptr)); AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index); AVX_TYPE v = AVX_ADD(v1, v2); AVX_STOREU(input_ptr + input_index, v); } *cmk_ptr += AVX_PSIZE; continue; } } // previous special cases do not apply; calculate input index one by one for (int ip = 0; ip < AVX_PSIZE; ip++) { int kc = (k + ik + ip) / kernel_ri; int kri = (k + ik + ip) - kc * kernel_ri; int kr = kri / in_channel; int ki = kri - kr * in_channel; int input_col = kc + cstart; int input_row = kr + rstart; if (input_col < input_cols && input_col >= 0 && input_row < input_rows && input_row >= 0) { int input_index = idx_base + input_col * input_ri + input_row * in_channel + ki; if (reverse_mode == 0) output_ptr[*cmk_ptr] = input_ptr[input_index]; else input_ptr[input_index] += output_ptr[*cmk_ptr]; } *cmk_ptr += 1; } } // second, load the rest `actual_kc - kc_strip` numbers for (; ik < actual_kc; ik++) { int kc = (k + ik) / kernel_ri; int kri = (k + ik) - kc * kernel_ri; int kr = kri / in_channel; int ki = kri - kr * in_channel; int input_col = kc + cstart; int input_row = kr + rstart; if (input_col < input_cols && input_col >= 0 && input_row < input_rows && input_row >= 0) { int input_index = idx_base + input_col * input_ri + input_row * in_channel + ki; if (reverse_mode == 0) output_ptr[*cmk_ptr] = input_ptr[input_index]; else input_ptr[input_index] += output_ptr[*cmk_ptr]; } *cmk_ptr += 1; } return; } #endif /* AVX_PSIZE */ /* * GEBP-based implementation. See Goto et.al [08] for detail. */ CAMLprim value FUN_NATIVE (spatial) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vPadding, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int padding = Long_val(vPadding); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int output_cri = out_channel * output_rows * output_cols; const int output_cr = output_rows * output_cols; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; const int kernel_cr = kernel_cols * kernel_rows; const int kernel_ri = kernel_rows * in_channel; memset(output_ptr, 0, batches * output_cri * sizeof(TYPE)); INIT; int pr = 0, pc = 0; if (padding != 1) { pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; } // if generated input matrix is small enough, use im2col implementation if (kernel_cri * output_crb < IM2COL_THRESHOLD) { TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); for (int i = 0; i < output_crb; ++i) { int bt = i / output_cr; int cr = i % output_cr; int c = cr / output_rows; int r = cr % output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int input_idx_base = bt * input_cri; int cnt = 0; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int h = 0; h < in_channel; ++h) { if (a < input_cols && a >= 0 && b < input_rows && b >= 0) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx]; } ++cnt; } } } } GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans, output_crb, out_channel, kernel_cri, ALPHA, inpt2d, kernel_cri, kernel_ptr, out_channel, BETA, output_ptr, out_channel); free(inpt2d); return Val_unit; } int mc = output_crb; int kc = kernel_cri; int nc = out_channel; compute_block_sizes(&kc, &nc, &mc, sizeof(TYPE)); #ifdef AVX_PSIZE int fast_flag = (in_channel % AVX_PSIZE == 0); TYPE *temp_mk = NULL; if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE))) exit(1); #else TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE)); if (temp_mk == NULL) exit(1); #endif TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE)); if (temp_kn == NULL) exit(1); TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE)); if (temp_mn == NULL) exit(1); for (int m = 0; m < output_crb; m += mc) { int actual_mc = fminf(m + mc, output_crb) - m; for (int k = 0; k < kernel_cri; k += kc) { memset(temp_mk, 0, mc * kc * sizeof(TYPE)); int actual_kc = fminf(k + kc, kernel_cri) - k; #ifdef AVX_PSIZE int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE; #endif // iterate along each row of the generated input matrix; processing four // rows in parallel with the help of e.g. OpenMP should be possible int cmk = 0; for (int im = 0; im < actual_mc; im += 1) { int b = (m + im) / output_cr; int cr = (m + im) - b * output_cr; int c = cr / output_rows; int r = cr - c * output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; const int idx_base = b * input_cri; // fill in the sub input matrix #ifdef AVX_PSIZE if (fast_flag) { ACX_FUN_LOAD (load_sub_matrix_fast, spatial) ( input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri, in_channel, idx_base, cstart, rstart, input_cols, input_rows, 0); } else { ACX_FUN_LOAD (load_sub_matrix, spatial) ( input_ptr, temp_mk, &cmk, kc_strip, actual_kc, k, kernel_ri, input_ri, in_channel, idx_base, cstart, rstart, input_cols, input_rows, kernel_rows, 0); } #else for (int ik = 0; ik < actual_kc; ik += 1) { int kc = (k + ik) / kernel_ri; int kri = (k + ik) - kc * kernel_ri; int kr = kri / in_channel; int ki = kri - kr * in_channel; int input_col = kc + cstart; int input_row = kr + rstart; if (input_col < input_cols && input_col >= 0 && input_row < input_rows && input_row >= 0) { int input_index = idx_base + input_col * input_ri + input_row * in_channel + ki; temp_mk[cmk] = input_ptr[input_index]; } cmk++; } #endif } int idx_kn_base = k * out_channel; for (int n = 0; n < out_channel; n += nc) { int actual_nc = fminf(n + nc, out_channel) - n; idx_kn_base += n; // fill in the kernel matrix int cnk = 0; for (int ik = 0; ik < actual_kc; ik++) { for (int jn = 0; jn < actual_nc; jn++) { int index_kn = idx_kn_base + ik * out_channel + jn; temp_kn[cnk++] = kernel_ptr[index_kn]; } } GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans, actual_mc, actual_nc, actual_kc, ALPHA, temp_mk, actual_kc, temp_kn, actual_nc, BETA, temp_mn, actual_nc); int cmn = 0; for (int ix = 0; ix < actual_mc; ix++) { for (int iy = 0; iy < actual_nc; iy++) { int index_mn = (ix + m) * out_channel + (iy + n); output_ptr[index_mn] += temp_mn[cmn++]; } } } } } free(temp_mk); free(temp_kn); free(temp_mn); return Val_unit; } CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) { return FUN_NATIVE (spatial) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16] ); } CAMLprim value FUN_NATIVE (spatial_backward_input) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int output_ri = out_channel * output_rows; const int output_cr = output_rows * output_cols; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; const int kernel_ri = kernel_rows * in_channel; int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; memset(input_ptr, 0, batches * input_cri * sizeof(TYPE)); INIT; if (kernel_cri * output_crb < IM2COL_THRESHOLD) { TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); GEMM(CblasRowMajor, CblasNoTrans, CblasTrans, output_crb, kernel_cri, out_channel, ALPHA, output_ptr, out_channel, kernel_ptr, out_channel, BETA, inpt2d, kernel_cri); for (int i = 0; i < output_crb; ++i) { int bt = i / output_cr; int cr = i % output_cr; int c = cr / output_rows; int r = cr % output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int input_idx_base = bt * input_cri; int cnt = 0; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int h = 0; h < in_channel; ++h) { if (a < input_cols && a >= 0 && b < input_rows && b >= 0) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt]; } ++cnt; } } } } free(inpt2d); return Val_unit; } int mc = output_crb; int kc = kernel_cri; int nc = out_channel; compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE)); #ifdef AVX_PSIZE int fast_flag = (in_channel % AVX_PSIZE == 0); TYPE *temp_mk = NULL; if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE))) exit(1); #else TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE)); if (temp_mk == NULL) exit(1); #endif TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE)); if (temp_kn == NULL) exit(1); TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE)); if (temp_mn == NULL) exit(1); for (int m = 0; m < output_crb; m += mc) { int actual_mc = fminf(m + mc, output_crb) - m; int idx_mn_base = m * out_channel; for (int k = 0; k < kernel_cri; k += kc) { int actual_kc = fminf(k + kc, kernel_cri) - k; int idx_kn_base = k * out_channel; #ifdef AVX_PSIZE int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE; #endif for (int n = 0; n < out_channel; n += nc) { int actual_nc = fminf(n + nc, out_channel) - n; idx_kn_base += n; idx_mn_base += n; int cnk = 0; for (int ik = 0; ik < actual_kc; ik++) { for (int jn = 0; jn < actual_nc; jn++) { int index_kn = idx_kn_base + ik * out_channel + jn; temp_kn[cnk++] = kernel_ptr[index_kn]; } } int cmn = 0; for (int ix = 0; ix < actual_mc; ix++) { for (int iy = 0; iy < actual_nc; iy++) { int index_mn = idx_mn_base + ix * out_channel + iy; temp_mn[cmn++] = output_ptr[index_mn]; } } GEMM(CblasRowMajor, CblasNoTrans, CblasTrans, actual_mc, actual_kc, actual_nc, ALPHA, temp_mn, actual_nc, temp_kn, actual_nc, BETA, temp_mk, actual_kc); int cmk = 0; for (int im = 0; im < actual_mc; im += 1) { int b = (m + im) / output_cr; int cr = (m + im) - b * output_cr; int c = cr / output_rows; int r = cr - c * output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; int idx_mk_base = b * input_cri; #ifdef AVX_PSIZE if (fast_flag) { ACX_FUN_LOAD (load_sub_matrix_fast, spatial) ( input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri, in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 1); } else { ACX_FUN_LOAD (load_sub_matrix, spatial) ( input_ptr, temp_mk, &cmk, kc_strip, actual_kc, k, kernel_ri, input_ri, in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, kernel_rows, 1); } #else for (int ik = 0; ik < actual_kc; ik += 1) { int kc = (k + ik) / kernel_ri; int kri = (k + ik) - kc * kernel_ri; int kr = kri / in_channel; int ki = kri - kr * in_channel; int input_col = kc + cstart; int input_row = kr + rstart; if (input_col < input_cols && input_col >= 0 && input_row < input_rows && input_row >= 0) { int input_index = idx_mk_base + input_col * input_ri + input_row * in_channel + ki; input_ptr[input_index] += temp_mk[cmk]; } cmk++; } #endif } } } } free(temp_mk); free(temp_kn); free(temp_mn); return Val_unit; } CAMLprim value FUN_BYTE (spatial_backward_input) (value * argv, int argn) { return FUN_NATIVE (spatial_backward_input) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } CAMLprim value FUN_NATIVE (spatial_backward_kernel) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int kernel_rio = out_channel * in_channel * kernel_rows; const int output_ri = out_channel * output_rows; const int output_cr = output_rows * output_cols; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; const int kernel_ri = kernel_rows * in_channel; int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE)); INIT; if (kernel_cri * output_crb < IM2COL_THRESHOLD) { TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); for (int i = 0; i < output_crb; ++i) { int bt = i / output_cr; int cr = i % output_cr; int c = cr / output_rows; int r = cr % output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int input_idx_base = bt * input_cri; int cnt = 0; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int h = 0; h < in_channel; ++h) { if (a < input_cols && a >= 0 && b < input_rows && b >= 0) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx]; } ++cnt; } } } } GEMM(CblasRowMajor, CblasTrans, CblasNoTrans, out_channel, kernel_cri, output_crb, ALPHA, output_ptr, out_channel, inpt2d, kernel_cri, BETA, kern2d, kernel_cri); int cnt = 0; for (int j = 0; j < kernel_cri; ++j) { for (int i = 0; i < out_channel; ++i) { kernel_ptr[cnt++] = kern2d[i * kernel_cri + j]; } } free(inpt2d); free(kern2d); return Val_unit; } int mc = output_crb; int kc = kernel_cri; int nc = out_channel; compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE)); #ifdef AVX_PSIZE int fast_flag = (in_channel % AVX_PSIZE == 0); TYPE *temp_mk = NULL; if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE))) exit(1); #else TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE)); if (temp_mk == NULL) exit(1); #endif TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE)); if (temp_kn == NULL) exit(1); TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE)); if (temp_mn == NULL) exit(1); for (int m = 0; m < output_crb; m += mc) { int actual_mc = fminf(m + mc, output_crb) - m; int idx_mn_base = m * out_channel; for (int k = 0; k < kernel_cri; k += kc) { int actual_kc = fminf(k + kc, kernel_cri) - k; int idx_kn_base = k * out_channel; memset(temp_mk, 0, mc * kc * sizeof(TYPE)); #ifdef AVX_PSIZE int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE; #endif int cmk = 0; for (int im = 0; im < actual_mc; im += 1) { int b = (m + im) / output_cr; int cr = (m + im) - b * output_cr; int c = cr / output_rows; int r = cr - c * output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; const int idx_mk_base = b * input_cri; #ifdef AVX_PSIZE if (fast_flag) { ACX_FUN_LOAD (load_sub_matrix_fast, spatial) ( input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri, in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 0); } else { ACX_FUN_LOAD (load_sub_matrix, spatial) ( input_ptr, temp_mk, &cmk, kc_strip, actual_kc, k, kernel_ri, input_ri, in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, kernel_rows, 0); } #else for (int ik = 0; ik < actual_kc; ik += 1) { int kc = (k + ik) / kernel_ri; int kri = (k + ik) - kc * kernel_ri; int kr = kri / in_channel; int ki = kri - kr * in_channel; int input_col = kc + cstart; int input_row = kr + rstart; if (input_col < input_cols && input_col >= 0 && input_row < input_rows && input_row >= 0) { int input_index = idx_mk_base + input_col * input_ri + input_row * in_channel + ki; temp_mk[cmk] = input_ptr[input_index]; } cmk++; } #endif } for (int n = 0; n < out_channel; n += nc) { int actual_nc = fminf(n + nc, out_channel) - n; idx_mn_base += n; idx_kn_base += n; int cmn = 0; for (int ix = 0; ix < actual_mc; ix++) { for (int iy = 0; iy < actual_nc; iy++) { int index_mn = idx_mn_base + ix * out_channel + iy; temp_mn[cmn++] = output_ptr[index_mn]; } } memset(temp_kn, 0, nc * kc * sizeof(TYPE)); GEMM(CblasRowMajor, CblasTrans, CblasNoTrans, actual_nc, actual_kc, actual_mc, ALPHA, temp_mn, actual_nc, temp_mk, actual_kc, BETA, temp_kn, actual_kc); int cnk = 0; for (int jn = 0; jn < actual_nc; jn++) { for (int ik = 0; ik < actual_kc; ik++) { int index_kn = idx_kn_base + ik * out_channel + jn; kernel_ptr[index_kn] = temp_kn[cnk++]; } } } } } free(temp_mk); free(temp_kn); free(temp_mn); return Val_unit; } CAMLprim value FUN_BYTE (spatial_backward_kernel) (value * argv, int argn) { return FUN_NATIVE (spatial_backward_kernel) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } /* * im2col implementation */ CAMLprim value FUN_NATIVE (spatial_im2col) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vPadding, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int padding = Long_val(vPadding); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int output_cri = out_channel * output_rows * output_cols; const int output_cr = output_rows * output_cols; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); memset(output_ptr, 0, batches * output_cri * sizeof(TYPE)); INIT; int pr = 0, pc = 0; if (padding != 1) { pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; } #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif /* _OPENMP */ for (int i = 0; i < output_crb; ++i) { int bt = i / output_cr; int cr = i % output_cr; int c = cr / output_rows; int r = cr % output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int input_idx_base = bt * input_cri; int cnt = 0; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int h = 0; h < in_channel; ++h) { if (a < input_cols && a >= 0 && b < input_rows && b >= 0) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx]; } ++cnt; } } } } GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans, output_crb, out_channel, kernel_cri, ALPHA, inpt2d, kernel_cri, kernel_ptr, out_channel, BETA, output_ptr, out_channel); free(inpt2d); return Val_unit; } CAMLprim value FUN_BYTE (spatial_im2col) (value * argv, int argn) { return FUN_NATIVE (spatial_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16] ); } CAMLprim value FUN_NATIVE (spatial_backward_kernel_im2col) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int kernel_rio = out_channel * in_channel * kernel_rows; const int output_ri = out_channel * output_rows; const int output_cr = output_rows * output_cols; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; INIT; TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE)); int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif /* _OPENMP */ for (int i = 0; i < output_crb; ++i) { int bt = i / output_cr; int cr = i % output_cr; int c = cr / output_rows; int r = cr % output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int input_idx_base = bt * input_cri; int cnt = 0; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int h = 0; h < in_channel; ++h) { if (a < input_cols && a >= 0 && b < input_rows && b >= 0) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx]; } ++cnt; } } } } GEMM(CblasRowMajor, CblasTrans, CblasNoTrans, out_channel, kernel_cri, output_crb, ALPHA, output_ptr, out_channel, inpt2d, kernel_cri, BETA, kern2d, kernel_cri); int cnt = 0; for (int j = 0; j < kernel_cri; ++j) { for (int i = 0; i < out_channel; ++i) { kernel_ptr[cnt++] = kern2d[i * kernel_cri + j]; } } free(inpt2d); free(kern2d); return Val_unit; } CAMLprim value FUN_BYTE (spatial_backward_kernel_im2col) (value * argv, int argn) { return FUN_NATIVE (spatial_backward_kernel_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } CAMLprim value FUN_NATIVE (spatial_backward_input_im2col) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int output_ri = out_channel * output_rows; const int output_cr = output_rows * output_cols; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); memset(input_ptr, 0, batches * input_cri * sizeof(TYPE)); INIT; int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; GEMM(CblasRowMajor, CblasNoTrans, CblasTrans, output_crb, kernel_cri, out_channel, ALPHA, output_ptr, out_channel, kernel_ptr, out_channel, BETA, inpt2d, kernel_cri); for (int i = 0; i < output_crb; ++i) { int bt = i / output_cr; int cr = i % output_cr; int c = cr / output_rows; int r = cr % output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int input_idx_base = bt * input_cri; int cnt = 0; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int h = 0; h < in_channel; ++h) { if (a < input_cols && a >= 0 && b < input_rows && b >= 0) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt]; } ++cnt; } } } } free(inpt2d); return Val_unit; } CAMLprim value FUN_BYTE (spatial_backward_input_im2col) (value * argv, int argn) { return FUN_NATIVE (spatial_backward_input_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } CAMLprim value FUN_NATIVE (cuboid_im2col) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride, value vPadding ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int padding = Long_val(vPadding); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int output_crdo = out_channel * output_dpts * output_rows * output_cols; const int output_dr = output_dpts * output_rows; const int output_drc = output_dpts * output_rows * output_cols; const int output_drcb = output_dpts * output_rows * output_cols * batches; const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols; TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE)); INIT; int pd = 0, pr = 0, pc = 0; if (padding != 1) { pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; } #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif /* _OPENMP */ for (int i = 0; i < output_drcb; ++i) { int bt = i / output_drc; int jkd = i % output_drc; int j = jkd / output_dr; int kd = jkd % output_dr; int k = kd / output_dpts; int d = kd % output_dpts; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int dstart = d * dpt_stride - pd; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int dend = dstart + kernel_dpts; const int input_idx_base = bt * input_crdi; int cnt = 0; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int c = dstart; c < dend; ++c) { for (int h = 0; h < in_channel; ++h) { if (a >= 0 && a < input_cols && b >= 0 && b < input_rows && c >= 0 && c < input_dpts) { int input_idx = input_idx_base + a * input_rdi + b * input_di + c * in_channel + h; inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx]; } ++cnt; } } } } } GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans, output_drcb, out_channel, kernel_idrc, ALPHA, inpt2d, kernel_idrc, kernel_ptr, out_channel, BETA, output_ptr, out_channel); free(inpt2d); return Val_unit; } CAMLprim value FUN_BYTE (cuboid_im2col) (value * argv, int argn) { return FUN_NATIVE (cuboid_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18] ); } CAMLprim value FUN_NATIVE (cuboid_backward_kernel_im2col) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows; const int output_dr = output_dpts * output_rows; const int output_drc = output_dpts * output_rows * output_cols; const int output_drcb = output_dpts * output_rows * output_cols * batches; const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols; INIT; TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE)); int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif /* _OPENMP */ for (int i = 0; i < output_drcb; ++i) { int bt = i / output_drc; int jkd = i % output_drc; int j = jkd / output_dr; int kd = jkd % output_dr; int k = kd / output_dpts; int d = kd % output_dpts; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int dstart = d * dpt_stride - pd; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int dend = dstart + kernel_dpts; const int input_idx_base = bt * input_crdi; int cnt = 0; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int c = dstart; c < dend; ++c) { for (int h = 0; h < in_channel; ++h) { if (a >= 0 && a < input_cols && b >= 0 && b < input_rows && c >= 0 && c < input_dpts) { int input_idx = input_idx_base + a * input_rdi + b * input_di + c * in_channel + h; inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx]; } ++cnt; } } } } } GEMM(CblasRowMajor, CblasTrans, CblasNoTrans, out_channel, kernel_idrc, output_drcb, ALPHA, output_ptr, out_channel, inpt2d, kernel_idrc, BETA, kern2d, kernel_idrc); int cnt = 0; for (int j = 0; j < kernel_idrc; ++j) { for (int i = 0; i < out_channel; ++i) { kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j]; } } free(inpt2d); free(kern2d); return Val_unit; } CAMLprim value FUN_BYTE (cuboid_backward_kernel_im2col) (value * argv, int argn) { return FUN_NATIVE (cuboid_backward_kernel_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17] ); } CAMLprim value FUN_NATIVE (cuboid_backward_input_im2col) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int output_dr = output_dpts * output_rows; const int output_drc = output_dpts * output_rows * output_cols; const int output_drcb = output_dpts * output_rows * output_cols * batches; const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols; TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE)); INIT; int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; GEMM(CblasRowMajor, CblasNoTrans, CblasTrans, output_drcb, kernel_idrc, out_channel, ALPHA, output_ptr, out_channel, kernel_ptr, out_channel, BETA, inpt2d, kernel_idrc); for (int i = 0; i < output_drcb; ++i) { int bt = i / output_drc; int jkd = i % output_drc; int j = jkd / output_dr; int kd = jkd % output_dr; int k = kd / output_dpts; int d = kd % output_dpts; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int dstart = d * dpt_stride - pd; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int dend = dstart + kernel_dpts; const int input_idx_base = bt * input_crdi; int cnt = 0; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int c = dstart; c < dend; ++c) { for (int h = 0; h < in_channel; ++h) { if (a >= 0 && a < input_cols && b >= 0 && b < input_rows && c >= 0 && c < input_dpts) { int input_idx = input_idx_base + a * input_rdi + b * input_di + c * in_channel + h; input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt]; } ++cnt; } } } } } free(inpt2d); return Val_unit; } CAMLprim value FUN_BYTE (cuboid_backward_input_im2col) (value * argv, int argn) { return FUN_NATIVE (cuboid_backward_input_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17] ); } /* * memory-efficient implementation */ CAMLprim value FUN_NATIVE (spatial_mec) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vPadding, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int padding = Long_val(vPadding); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = input_rows * in_channel; const int output_cri = out_channel * output_rows * output_cols; const int kernel_cri = kernel_cols * kernel_rows * in_channel; const int kernel_rio = kernel_rows * in_channel * out_channel; const int kernel_io = in_channel * out_channel; const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride; const int output_bco = out_channel * output_cols * batches; const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel; const int inpt2d_rows = batches * output_cols; const int inpt2d_step = inpt2d_rows * kernel_cols * in_channel * row_stride; TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); TYPE *output2d = (TYPE *) calloc(batches * output_cri, sizeof(TYPE)); if (output2d == NULL) exit(1); memset(output_ptr, 0, batches * output_cri * sizeof(TYPE)); INIT; int pr = 0, pc = 0; if (padding != 1) { pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; } int cnt = 0; int kidx = 0; for (int o = 0; o < out_channel; ++o) { for (int r = 0; r < kernel_rows; ++r) { for (int c = 0; c < kernel_cols; ++c) { for (int i = 0; i < in_channel; ++i) { kidx = c * kernel_rio + r * kernel_io + i * out_channel + o; kern2d[cnt++] = kernel_ptr[kidx]; } } } } for (int i = 0; i < inpt2d_rows; ++i) { int bt = i / output_cols; int c = i % output_cols; const int cstart = c * col_stride - pc; const int cend = cstart + kernel_cols; const int rstart = 0 - pr; const int rend = rstart + padded_input_rows; int counter = 0; for (int a = rstart; a < rend; ++a) { for (int b = cstart; b < cend; ++b) { for (int h = 0; h < in_channel; ++h) { if (b < input_cols && b >= 0 && a < input_rows && a >= 0) { int input_idx = bt * input_cri + b * input_ri + a * in_channel + h; inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx]; } counter++; } } } } for (int i = 0; i < output_rows; ++i) { GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans, inpt2d_rows, out_channel, kernel_cri, ALPHA, inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_cri, BETA, output2d + output_bco * i, inpt2d_rows); } cnt = 0; for (int j = 0; j < inpt2d_rows; ++j) { for (int i = 0; i < output_rows * out_channel; ++i) { output_ptr[cnt++] = output2d[i * inpt2d_rows + j]; } } free(inpt2d); free(kern2d); free(output2d); return Val_unit; } CAMLprim value FUN_BYTE (spatial_mec) (value * argv, int argn) { return FUN_NATIVE (spatial_mec) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16] ); } CAMLprim value FUN_NATIVE (spatial_backward_kernel_mec) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int output_ri = out_channel * output_rows; const int output_cr = output_rows * output_cols; const int output_ro = output_rows * out_channel; const int output_crb = output_rows * output_cols * batches; const int kernel_io = in_channel * out_channel; const int kernel_rio = kernel_rows * in_channel * out_channel; const int kernel_cri = kernel_cols * kernel_rows * in_channel; const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride; const int output_bco = out_channel * output_cols * batches; const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel; const int inpt2d_rows = batches * output_cols; const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride; TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE)); if (output2d == NULL) exit(1); memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE)); INIT; int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; for (int i = 0; i < inpt2d_rows; ++i) { int bt = i / output_cols; int c = i % output_cols; const int cstart = c * col_stride - pc; const int cend = cstart + kernel_cols; const int rstart = 0 - pr; const int rend = rstart + padded_input_rows; int counter = 0; for (int a = rstart; a < rend; ++a) { for (int b = cstart; b < cend; ++b) { for (int h = 0; h < in_channel; ++h) { if (b < input_cols && b >= 0 && a < input_rows && a >= 0) { int input_idx = bt * input_cri + b * input_ri + a * in_channel + h; inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx]; } counter++; } } } } int cnt = 0; for (int j = 0; j < inpt2d_rows; ++j) { for (int i = 0; i < output_ro; ++i) { output2d[i * inpt2d_rows + j] = output_ptr[cnt++]; } } for (int i = 0; i < output_rows; ++i) { GEMM(CblasColMajor, CblasTrans, CblasNoTrans, out_channel, kernel_cri, inpt2d_rows, ALPHA, output2d + output_bco * i, inpt2d_rows, inpt2d + inpt2d_step * i, inpt2d_rows, ALPHA, kern2d, out_channel); } cnt = 0; int kidx = 0; for (int r = 0; r < kernel_rows; ++r) { for (int c = 0; c < kernel_cols; ++c) { for (int i = 0; i < in_channel; ++i) { for (int o = 0; o < out_channel; ++o) { kidx = c * kernel_rio + r * kernel_io + i * out_channel + o; kernel_ptr[kidx] = kern2d[cnt++]; } } } } free(inpt2d); free(kern2d); free(output2d); return Val_unit; } CAMLprim value FUN_BYTE (spatial_backward_kernel_mec) (value * argv, int argn) { return FUN_NATIVE (spatial_backward_kernel_mec) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } CAMLprim value FUN_NATIVE (spatial_backward_input_mec) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int output_ri = out_channel * output_rows; const int output_cr = output_rows * output_cols; const int output_ro = output_rows * out_channel; const int output_crb = output_rows * output_cols * batches; const int kernel_io = in_channel * out_channel; const int kernel_rio = kernel_rows * in_channel * out_channel; const int kernel_cri = kernel_cols * kernel_rows * in_channel; const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride; const int output_bco = out_channel * output_cols * batches; const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel; const int inpt2d_rows = batches * output_cols; const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride; TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE)); if (output2d == NULL) exit(1); memset(input_ptr, 0, batches * input_cri * sizeof(TYPE)); INIT; int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; int cnt = 0; for (int j = 0; j < inpt2d_rows; ++j) { for (int i = 0; i < output_ro; ++i) { output2d[i * inpt2d_rows + j] = output_ptr[cnt++]; } } cnt = 0; int kidx = 0; for (int o = 0; o < out_channel; ++o) { for (int r = 0; r < kernel_rows; ++r) { for (int c = 0; c < kernel_cols; ++c) { for (int i = 0; i < in_channel; ++i) { kidx = c * kernel_rio + r * kernel_io + i * out_channel + o; kern2d[cnt++] = kernel_ptr[kidx]; } } } } for (int i = 0; i < output_rows; ++i) { GEMM(CblasColMajor, CblasNoTrans, CblasTrans, inpt2d_rows, kernel_cri, out_channel, ALPHA, output2d + output_bco * i, inpt2d_rows, kern2d, kernel_cri, ALPHA, inpt2d + inpt2d_step * i, inpt2d_rows); } for (int i = 0; i < inpt2d_rows; ++i) { int bt = i / output_cols; int c = i % output_cols; const int cstart = c * col_stride - pc; const int cend = cstart + kernel_cols; const int rstart = 0 - pr; const int rend = rstart + padded_input_rows; const int input_idx_base = bt * input_cri; int counter = 0; for (int a = rstart; a < rend; ++a) { for (int b = cstart; b < cend; ++b) { for (int h = 0; h < in_channel; ++h) { if (b < input_cols && b >= 0 && a < input_rows && a >= 0) { int input_idx = input_idx_base + b * input_ri + a * in_channel + h; input_ptr[input_idx] += inpt2d[counter * inpt2d_rows + i]; } counter++; } } } } free(inpt2d); free(kern2d); free(output2d); return Val_unit; } CAMLprim value FUN_BYTE (spatial_backward_input_mec) (value * argv, int argn) { return FUN_NATIVE (spatial_backward_input_mec) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } CAMLprim value FUN_NATIVE (cuboid_mec) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride, value vPadding ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int padding = Long_val(vPadding); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int output_crdo = out_channel * output_dpts * output_rows * output_cols; const int output_rdo = out_channel * output_dpts * output_rows; const int output_dr = output_dpts * output_rows; const int output_drc = output_dpts * output_rows * output_cols; const int output_drcb = output_dpts * output_rows * output_cols * batches; const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols; const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel; const int kernel_dio = kernel_dpts * in_channel * out_channel; const int kernel_io = in_channel * out_channel; const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride; const int output_bcdo = out_channel * output_cols * output_dpts * batches; const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel; const int inpt2d_rows = batches * output_cols * output_dpts; const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride; INIT; int pd = 0, pr = 0, pc = 0; if (padding != 1) { pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; } TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE)); if (output2d == NULL) exit(1); memset(output_ptr, 0, output_drcb * out_channel * sizeof(TYPE)); int cnt = 0; int kidx = 0; for (int o = 0; o < out_channel; ++o) { for (int r = 0; r < kernel_rows; ++r) { for (int c = 0; c < kernel_cols; ++c) { for (int d = 0; d < kernel_dpts; ++d) { for (int i = 0; i < in_channel; ++i) { kidx = c * kernel_rdio + r * kernel_dio + d * kernel_io + i * out_channel + o; kern2d[cnt++] = kernel_ptr[kidx]; } } } } } const int rstart = 0 - pr; const int rend = rstart + padded_input_rows; for (int i = 0; i < inpt2d_rows; ++i) { int bt = i / (output_cols * output_dpts); int cd = i % (output_cols * output_dpts); int ct = cd / output_dpts; int dt = cd % output_dpts; const int cstart = ct * col_stride - pc; const int dstart = dt * dpt_stride - pd; const int cend = cstart + kernel_cols; const int dend = dstart + kernel_dpts; const int input_idx_base = bt * input_crdi; int cnt = 0; for (int r = rstart; r < rend; ++r) { for (int c = cstart; c < cend; ++c) { for (int d = dstart; d < dend; ++d) { for (int h = 0; h < in_channel; ++h) { if (c >= 0 && c < input_cols && r >= 0 && r < input_rows && d >= 0 && d < input_dpts) { int input_idx = input_idx_base + c * input_rdi + r * input_di + d * in_channel + h; inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx]; } ++cnt; } } } } } for (int i = 0; i < output_rows; ++i) { GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans, inpt2d_rows, out_channel, kernel_idrc, ALPHA, inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_idrc, BETA, output2d + output_bcdo * i, inpt2d_rows); } cnt = 0; int oidx = 0; for (int r = 0; r < output_rows; ++r) { for (int o = 0; o < out_channel; ++o) { for (int b = 0; b < batches; ++b) { for (int c = 0; c < output_cols; ++c) { for (int d = 0; d < output_dpts; ++d) { oidx = b * output_crdo + c * output_rdo + r * output_dpts * out_channel + d * out_channel + o; output_ptr[oidx] = output2d[cnt++]; } } } } } free(inpt2d); free(kern2d); free(output2d); return Val_unit; } CAMLprim value FUN_BYTE (cuboid_mec) (value * argv, int argn) { return FUN_NATIVE (cuboid_mec) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18] ); } CAMLprim value FUN_NATIVE (cuboid_backward_kernel_mec) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int output_crdo = out_channel * output_dpts * output_rows * output_cols; const int output_rdo = out_channel * output_dpts * output_rows; const int output_dr = output_dpts * output_rows; const int output_drc = output_dpts * output_rows * output_cols; const int output_drcb = output_dpts * output_rows * output_cols * batches; const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols; const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel; const int kernel_dio = kernel_dpts * in_channel * out_channel; const int kernel_io = in_channel * out_channel; const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride; const int output_bcdo = out_channel * output_cols * output_dpts * batches; const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel; const int inpt2d_rows = batches * output_cols * output_dpts; const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride; TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE)); if (output2d == NULL) exit(1); memset(kernel_ptr, 0, kernel_idrc * out_channel * sizeof(TYPE)); INIT; int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; int cnt; const int rstart = 0 - pr; const int rend = rstart + padded_input_rows; for (int i = 0; i < inpt2d_rows; ++i) { int bt = i / (output_cols * output_dpts); int cd = i % (output_cols * output_dpts); int ct = cd / output_dpts; int dt = cd % output_dpts; const int cstart = ct * col_stride - pc; const int dstart = dt * dpt_stride - pd; const int cend = cstart + kernel_cols; const int dend = dstart + kernel_dpts; const int input_idx_base = bt * input_crdi; cnt = 0; for (int r = rstart; r < rend; ++r) { for (int c = cstart; c < cend; ++c) { for (int d = dstart; d < dend; ++d) { for (int h = 0; h < in_channel; ++h) { if (c >= 0 && c < input_cols && r >= 0 && r < input_rows && d >= 0 && d < input_dpts) { int input_idx = input_idx_base + c * input_rdi + r * input_di + d * in_channel + h; inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx]; } ++cnt; } } } } } cnt = 0; int oidx = 0; for (int r = 0; r < output_rows; ++r) { for (int o = 0; o < out_channel; ++o) { for (int b = 0; b < batches; ++b) { for (int c = 0; c < output_cols; ++c) { for (int d = 0; d < output_dpts; ++d) { oidx = b * output_crdo + c * output_rdo + r * output_dpts * out_channel + d * out_channel + o; output2d[cnt++] = output_ptr[oidx]; } } } } } for (int i = 0; i < output_rows; ++i) { GEMM(CblasColMajor, CblasTrans, CblasNoTrans, out_channel, kernel_idrc, inpt2d_rows, ALPHA, output2d + output_bcdo * i, inpt2d_rows, inpt2d + inpt2d_step * i, inpt2d_rows, ALPHA, kern2d, out_channel); } cnt = 0; int kidx = 0; for (int r = 0; r < kernel_rows; ++r) { for (int c = 0; c < kernel_cols; ++c) { for (int d = 0; d < kernel_dpts; ++d) { for (int i = 0; i < in_channel; ++i) { for (int o = 0; o < out_channel; ++o) { kidx = c * kernel_rdio + r * kernel_dio + d * kernel_io + i * out_channel + o; kernel_ptr[kidx] = kern2d[cnt++]; } } } } } free(inpt2d); free(kern2d); free(output2d); return Val_unit; } CAMLprim value FUN_BYTE (cuboid_backward_kernel_mec) (value * argv, int argn) { return FUN_NATIVE (cuboid_backward_kernel_mec) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17] ); } CAMLprim value FUN_NATIVE (cuboid_backward_input_mec) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int output_crdo = out_channel * output_dpts * output_rows * output_cols; const int output_rdo = out_channel * output_dpts * output_rows; const int output_dr = output_dpts * output_rows; const int output_drc = output_dpts * output_rows * output_cols; const int output_drcb = output_dpts * output_rows * output_cols * batches; const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols; const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel; const int kernel_dio = kernel_dpts * in_channel * out_channel; const int kernel_io = in_channel * out_channel; const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride; const int output_bcdo = out_channel * output_cols * output_dpts * batches; const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel; const int inpt2d_rows = batches * output_cols * output_dpts; const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride; TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE)); if (output2d == NULL) exit(1); memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE)); INIT; int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; int cnt = 0; int oidx = 0; for (int r = 0; r < output_rows; ++r) { for (int o = 0; o < out_channel; ++o) { for (int b = 0; b < batches; ++b) { for (int c = 0; c < output_cols; ++c) { for (int d = 0; d < output_dpts; ++d) { oidx = b * output_crdo + c * output_rdo + r * output_dpts * out_channel + d * out_channel + o; output2d[cnt++] = output_ptr[oidx]; } } } } } cnt = 0; int kidx = 0; for (int o = 0; o < out_channel; ++o) { for (int r = 0; r < kernel_rows; ++r) { for (int c = 0; c < kernel_cols; ++c) { for (int d = 0; d < kernel_dpts; ++d) { for (int i = 0; i < in_channel; ++i) { kidx = c * kernel_rdio + r * kernel_dio + d * kernel_io + i * out_channel + o; kern2d[cnt++] = kernel_ptr[kidx]; } } } } } for (int i = 0; i < output_rows; ++i) { GEMM(CblasColMajor, CblasNoTrans, CblasTrans, inpt2d_rows, kernel_idrc, out_channel, ALPHA, output2d + output_bcdo * i, inpt2d_rows, kern2d, kernel_idrc, ALPHA, inpt2d + inpt2d_step * i, inpt2d_rows); } const int rstart = 0 - pr; const int rend = rstart + padded_input_rows; for (int i = 0; i < inpt2d_rows; ++i) { int bt = i / (output_cols * output_dpts); int cd = i % (output_cols * output_dpts); int ct = cd / output_dpts; int dt = cd % output_dpts; const int cstart = ct * col_stride - pc; const int dstart = dt * dpt_stride - pd; const int cend = cstart + kernel_cols; const int dend = dstart + kernel_dpts; const int input_idx_base = bt * input_crdi; int cnt = 0; for (int r = rstart; r < rend; ++r) { for (int c = cstart; c < cend; ++c) { for (int d = dstart; d < dend; ++d) { for (int h = 0; h < in_channel; ++h) { if (c >= 0 && c < input_cols && r >= 0 && r < input_rows && d >= 0 && d < input_dpts) { int input_idx = input_idx_base + c * input_rdi + r * input_di + d * in_channel + h; input_ptr[input_idx] += inpt2d[cnt * inpt2d_rows + i]; } ++cnt; } } } } } free(inpt2d); free(kern2d); free(output2d); return Val_unit; } CAMLprim value FUN_BYTE (cuboid_backward_input_mec) (value * argv, int argn) { return FUN_NATIVE (cuboid_backward_input_mec) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17] ); } /* * naive implementation */ CAMLprim value FUN_NATIVE (spatial_naive) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vPadding, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int padding = Long_val(vPadding); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int output_cri = out_channel * output_rows * output_cols; const int output_cr = output_rows * output_cols; const int output_ri = out_channel * output_rows; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; const int kernel_rio = out_channel * in_channel * kernel_rows; const int kernel_io = out_channel * in_channel; const int ksize = kernel_cols * kernel_rows; memset(output_ptr, 0, batches * output_cri * sizeof(TYPE)); INIT; int pr = 0, pc = 0; if (padding != 1) { pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2; pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; } for (int i = 0; i < batches; ++i) { const int input_idx_base = i * input_cri; for (int j = 0; j < output_cols; ++j) { for (int k = 0; k < output_rows; ++k) { const int output_idx_base = i * output_cri + j * output_ri + k * out_channel; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; for (int l = 0; l < out_channel; ++l) { TYPE sum = 0.; for (int h = 0; h < in_channel; ++h) { TYPE input_val, kernel_val; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { if (a >= 0 && a < input_cols && b >= 0 && b < input_rows) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; input_val = *(input_ptr + input_idx); } else { input_val = 0.; } int kernel_index = (a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l; kernel_val = *(kernel_ptr + kernel_index); sum += input_val * kernel_val; } } } int output_idx = output_idx_base + l; *(output_ptr + output_idx) = sum; } } } } return Val_unit; } CAMLprim value FUN_BYTE (spatial_naive) (value * argv, int argn) { return FUN_NATIVE (spatial_naive) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16] ); } CAMLprim value FUN_NATIVE (spatial_backward_kernel_naive) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int kernel_rio = out_channel * in_channel * kernel_rows; const int kernel_io = out_channel * in_channel; const int output_cri = out_channel * output_rows * output_cols; const int output_ri = out_channel * output_rows; memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE)); INIT; int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; for (int i = 0; i < batches; ++i) { for (int j = 0; j < output_cols; ++j) { for (int k = 0; k < output_rows; ++k) { const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; for (int l = 0; l < out_channel; ++l) { int output_idx = i * output_cri + j * output_ri + k * out_channel + l; TYPE output_val = *(output_ptr + output_idx); for (int h = 0; h < in_channel; ++h) { TYPE input_val = 0.; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { if (a >= 0 && a < input_cols && b >= 0 && b < input_rows) { int input_idx = i * input_cri + a * input_ri + b * in_channel + h; input_val = *(input_ptr + input_idx); } else { input_val = 0.; } int kernel_index = (a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l; *(kernel_ptr + kernel_index) += output_val * input_val; } } } } } } } return Val_unit; } CAMLprim value FUN_BYTE (spatial_backward_kernel_naive) (value * argv, int argn) { return FUN_NATIVE (spatial_backward_kernel_naive) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } CAMLprim value FUN_NATIVE (spatial_backward_input_naive) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int kernel_rio = out_channel * in_channel * kernel_rows; const int kernel_io = out_channel * in_channel; const int output_cri = out_channel * output_rows * output_cols; const int output_ri = out_channel * output_rows; memset(input_ptr, 0, batches * input_cri * sizeof(TYPE)); INIT; int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; for (int i = 0; i < batches; ++i) { for (int j = 0; j < output_cols; ++j) { for (int k = 0; k < output_rows; ++k) { const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; for (int l = 0; l < out_channel; ++l) { int output_idx = i * output_cri + j * output_ri + k * out_channel + l; TYPE output_val = *(output_ptr + output_idx); for (int h = 0; h < in_channel; ++h) { TYPE kernel_val = 0.; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { int kernel_index = (a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l; kernel_val = *(kernel_ptr + kernel_index); if (a >= 0 && a < input_cols && b >= 0 && b < input_rows) { int input_idx = i * input_cri + a * input_ri + b * in_channel + h; *(input_ptr + input_idx) += output_val * kernel_val; } } } } } } } } return Val_unit; } CAMLprim value FUN_BYTE (spatial_backward_input_naive) (value * argv, int argn) { return FUN_NATIVE (spatial_backward_input_naive) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } CAMLprim value FUN_NATIVE (cuboid_naive) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride, value vPadding ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int padding = Long_val(vPadding); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows; const int kernel_dio = out_channel * in_channel * kernel_dpts; const int kernel_io = out_channel * in_channel; const int output_crdo = out_channel * output_dpts * output_rows * output_cols; const int output_rdo = out_channel * output_dpts * output_rows; const int output_do = out_channel * output_dpts; INIT; int pd = 0, pr = 0, pc = 0; if (padding != 1) { pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; } for (int i = 0; i < batches; ++i) { const int input_idx_base = i * input_crdi; for (int j = 0; j < output_cols; ++j) { for (int k = 0; k < output_rows; ++k) { for (int d = 0; d < output_dpts; ++d) { const int output_idx_base = i * output_crdo + j * output_rdo + k * output_do + d * out_channel; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int dstart = d * dpt_stride - pd; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int dend = dstart + kernel_dpts; for (int l = 0; l < out_channel; ++l) { TYPE sum = 0.; int output_idx = output_idx_base + l; for (int h = 0; h < in_channel; ++h) { for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int c = dstart; c < dend; ++c) { TYPE input_val, kernel_val; if (a >= 0 && a < input_cols && b >= 0 && b < input_rows && c >= 0 && c < input_dpts) { int input_idx = input_idx_base + a * input_rdi + b * input_di + c * in_channel + h; input_val = *(input_ptr + input_idx); } else { input_val = 0.; } int kernel_index = (a - cstart) * kernel_rdio + (b - rstart) * kernel_dio + (c - dstart) * kernel_io + h * out_channel + l; kernel_val = *(kernel_ptr + kernel_index); sum += input_val * kernel_val; } } } } *(output_ptr + output_idx) = sum; } } } } } return Val_unit; } CAMLprim value FUN_BYTE (cuboid_naive) (value * argv, int argn) { return FUN_NATIVE (cuboid_naive) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18] ); } CAMLprim value FUN_NATIVE (cuboid_backward_kernel_naive) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows; const int kernel_dio = out_channel * in_channel * kernel_dpts; const int kernel_io = out_channel * in_channel; const int output_crdo = out_channel * output_dpts * output_rows * output_cols; const int output_rdo = out_channel * output_dpts * output_rows; const int output_do = out_channel * output_dpts; memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE)); INIT; int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; for (int i = 0; i < batches; ++i) { const int input_idx_base = i * input_crdi; for (int j = 0; j < output_cols; ++j) { for (int k = 0; k < output_rows; ++k) { for (int d = 0; d < output_dpts; ++d) { const int output_idx_base = i * output_crdo + j * output_rdo + k * output_do + d * out_channel; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int dstart = d * dpt_stride - pd; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int dend = dstart + kernel_dpts; for (int l = 0; l < out_channel; ++l) { int output_idx = output_idx_base + l; TYPE output_val = *(output_ptr + output_idx); for (int h = 0; h < in_channel; ++h) { for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int c = dstart; c < dend; ++c) { TYPE input_val = 0.; if (a >= 0 && a < input_cols && b >= 0 && b < input_rows && c >= 0 && c < input_dpts) { int input_idx = input_idx_base + a * input_rdi + b * input_di + c * in_channel + h; input_val = *(input_ptr + input_idx); } int kernel_index = (a - cstart) * kernel_rdio + (b - rstart) * kernel_dio + (c - dstart) * kernel_io + h * out_channel + l; *(kernel_ptr + kernel_index) += output_val * input_val; } } } } } } } } } return Val_unit; } CAMLprim value FUN_BYTE (cuboid_backward_kernel_naive) (value * argv, int argn) { return FUN_NATIVE (cuboid_backward_kernel_naive) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17] ); } CAMLprim value FUN_NATIVE (cuboid_backward_input_naive) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows; const int kernel_dio = out_channel * in_channel * kernel_dpts; const int kernel_io = out_channel * in_channel; const int output_crdo = out_channel * output_dpts * output_rows * output_cols; const int output_rdo = out_channel * output_dpts * output_rows; const int output_do = out_channel * output_dpts; memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE)); INIT; int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2; int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2; int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; for (int i = 0; i < batches; ++i) { const int input_idx_base = i * input_crdi; for (int j = 0; j < output_cols; ++j) { for (int k = 0; k < output_rows; ++k) { for (int d = 0; d < output_dpts; ++d) { const int output_idx_base = i * output_crdo + j * output_rdo + k * output_do + d * out_channel; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int dstart = d * dpt_stride - pd; const int cend = cstart + kernel_cols; const int rend = rstart + kernel_rows; const int dend = dstart + kernel_dpts; for (int l = 0; l < out_channel; ++l) { int output_idx = output_idx_base + l; TYPE output_val = *(output_ptr + output_idx); for (int h = 0; h < in_channel; ++h) { TYPE kernel_val; for (int a = cstart; a < cend; ++a) { for (int b = rstart; b < rend; ++b) { for (int c = dstart; c < dend; ++c) { int kernel_index = (a - cstart) * kernel_rdio + (b - rstart) * kernel_dio + (c - dstart) * kernel_io + h * out_channel + l; kernel_val = *(kernel_ptr + kernel_index); if (a >= 0 && a < input_cols && b >= 0 && b < input_rows && c >= 0 && c < input_dpts) { int input_idx = input_idx_base + a * input_rdi + b * input_di + c * in_channel + h; *(input_ptr + input_idx) += output_val * kernel_val; } } } } } } } } } } return Val_unit; } CAMLprim value FUN_BYTE (cuboid_backward_input_naive) (value * argv, int argn) { return FUN_NATIVE (cuboid_backward_input_naive) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17] ); } /* * dilated convolution */ CAMLprim value FUN_NATIVE (dilated_spatial_im2col) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vPadding, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int padding = Long_val(vPadding); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int output_cri = out_channel * output_rows * output_cols; const int output_cr = output_rows * output_cols; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; INIT; TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); memset(output_ptr, 0, batches * output_cri * sizeof(TYPE)); int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1); int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1); int pr = 0, pc = 0; if (padding != 1) { pr = (row_stride * ( output_rows - 1) + kernel_rows_up - input_rows) / 2; pc = (col_stride * ( output_cols - 1) + kernel_cols_up - input_cols) / 2; if (pr < 0) pr = 0; if (pc < 0) pc = 0; } #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif /* _OPENMP */ for (int i = 0; i < output_crb; ++i) { int bt = i / output_cr; int cr = i % output_cr; int c = cr / output_rows; int r = cr % output_rows; const int cstart = c * col_stride - pc; const int rstart = r * row_stride - pr; const int cend = cstart + kernel_cols_up; const int rend = rstart + kernel_rows_up; const int input_idx_base = bt * input_cri; int cnt = 0; for (int a = cstart; a < cend; a += col_in_stride) { for (int b = rstart; b < rend; b += row_in_stride) { for (int h = 0; h < in_channel; ++h) { if (a < input_cols && a >= 0 && b < input_rows && b >= 0) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx]; } ++cnt; } } } } GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans, output_crb, out_channel, kernel_cri, ALPHA, inpt2d, kernel_cri, kernel_ptr, out_channel, BETA, output_ptr, out_channel); free(inpt2d); return Val_unit; } CAMLprim value FUN_BYTE (dilated_spatial_im2col) (value * argv, int argn) { return FUN_NATIVE (dilated_spatial_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16] ); } CAMLprim value FUN_NATIVE (dilated_spatial_backward_kernel_im2col) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int kernel_rio = out_channel * in_channel * kernel_rows; const int output_ri = out_channel * output_rows; const int output_cr = output_rows * output_cols; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; INIT; TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE)); int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1); int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1); int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows; int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols; int p_top = pad_rows / 2; int p_left = pad_cols / 2; if (p_top < 0) p_top = 0; if (p_left < 0) p_left = 0; #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif /* _OPENMP */ for (int i = 0; i < output_crb; ++i) { int bt = i / output_cr; int cr = i % output_cr; int c = cr / output_rows; int r = cr % output_rows; const int cstart = c * col_stride - p_left; const int rstart = r * row_stride - p_top; const int cend = cstart + kernel_cols_up; const int rend = rstart + kernel_rows_up; const int input_idx_base = bt * input_cri; int cnt = 0; for (int a = cstart; a < cend; a += col_in_stride) { for (int b = rstart; b < rend; b += row_in_stride) { for (int h = 0; h < in_channel; ++h) { if (a < input_cols && a >= 0 && b < input_rows && b >= 0) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx]; } ++cnt; } } } } GEMM(CblasRowMajor, CblasTrans, CblasNoTrans, out_channel, kernel_cri, output_crb, ALPHA, output_ptr, out_channel, inpt2d, kernel_cri, BETA, kern2d, kernel_cri); int cnt = 0; for (int j = 0; j < kernel_cri; ++j) { for (int i = 0; i < out_channel; ++i) { kernel_ptr[cnt++] = kern2d[i * kernel_cri + j]; } } free(inpt2d); free(kern2d); return Val_unit; } CAMLprim value FUN_BYTE (dilated_spatial_backward_kernel_im2col) (value * argv, int argn) { return FUN_NATIVE (dilated_spatial_backward_kernel_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } CAMLprim value FUN_NATIVE (dilated_spatial_backward_input_im2col) ( value vInput_ptr, value vKernel_ptr, value vOutput_ptr, value vBatches, value vInput_cols, value vInput_rows, value vIn_channel, value vKernel_cols, value vKernel_rows, value vOutput_cols, value vOutput_rows, value vOut_channel, value vRow_stride, value vCol_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr); struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr); struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int out_channel = Long_val(vOut_channel); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_cri = in_channel * input_rows * input_cols; const int input_ri = in_channel * input_rows; const int output_ri = out_channel * output_rows; const int output_cr = output_rows * output_cols; const int output_crb = output_rows * output_cols * batches; const int kernel_cri = kernel_cols * kernel_rows * in_channel; INIT; TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); memset(input_ptr, 0, batches * input_cri * sizeof(TYPE)); int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1); int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1); int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows; int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols; int p_top = pad_rows / 2; int p_left = pad_cols / 2; if (p_top < 0) p_top = 0; if (p_left < 0) p_left = 0; GEMM(CblasRowMajor, CblasNoTrans, CblasTrans, output_crb, kernel_cri, out_channel, ALPHA, output_ptr, out_channel, kernel_ptr, out_channel, BETA, inpt2d, kernel_cri); for (int i = 0; i < output_crb; ++i) { int bt = i / output_cr; int cr = i % output_cr; int c = cr / output_rows; int r = cr % output_rows; const int cstart = c * col_stride - p_left; const int rstart = r * row_stride - p_top; const int cend = cstart + kernel_cols_up; const int rend = rstart + kernel_rows_up; const int input_idx_base = bt * input_cri; int cnt = 0; for (int a = cstart; a < cend; a += col_in_stride) { for (int b = rstart; b < rend; b += row_in_stride) { for (int h = 0; h < in_channel; ++h) { if (a < input_cols && a >= 0 && b < input_rows && b >= 0) { int input_idx = input_idx_base + a * input_ri + b * in_channel + h; input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt]; } ++cnt; } } } } free(inpt2d); return Val_unit; } CAMLprim value FUN_BYTE (dilated_spatial_backward_input_im2col) (value * argv, int argn) { return FUN_NATIVE (dilated_spatial_backward_input_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15] ); } CAMLprim value FUN_NATIVE (dilated_cuboid_im2col) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride, value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride, value vPadding ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int dpt_in_stride = Long_val(vDpt_in_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); int padding = Long_val(vPadding); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int output_crdo = out_channel * output_dpts * output_rows * output_cols; const int output_dr = output_dpts * output_rows; const int output_drc = output_dpts * output_rows * output_cols; const int output_drcb = output_dpts * output_rows * output_cols * batches; const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols; TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE)); INIT; int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1); int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1); int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1); int pd = 0, pr = 0, pc = 0; if (padding != 1) { pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2; pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2; pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; } #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif /* _OPENMP */ for (int i = 0; i < output_drcb; ++i) { int bt = i / output_drc; int jkd = i % output_drc; int j = jkd / output_dr; int kd = jkd % output_dr; int k = kd / output_dpts; int d = kd % output_dpts; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int dstart = d * dpt_stride - pd; const int cend = cstart + kernel_cols_up; const int rend = rstart + kernel_rows_up; const int dend = dstart + kernel_dpts_up; const int input_idx_base = bt * input_crdi; int cnt = 0; for (int a = cstart; a < cend; a += col_in_stride) { for (int b = rstart; b < rend; b += row_in_stride) { for (int c = dstart; c < dend; c += dpt_in_stride) { for (int h = 0; h < in_channel; ++h) { if (a >= 0 && a < input_cols && b >= 0 && b < input_rows && c >= 0 && c < input_dpts) { int input_idx = input_idx_base + a * input_rdi + b * input_di + c * in_channel + h; inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx]; } ++cnt; } } } } } GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans, output_drcb, out_channel, kernel_idrc, ALPHA, inpt2d, kernel_idrc, kernel_ptr, out_channel, BETA, output_ptr, out_channel); free(inpt2d); return Val_unit; } CAMLprim value FUN_BYTE (dilated_cuboid_im2col) (value * argv, int argn) { return FUN_NATIVE (dilated_cuboid_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18], argv[19], argv[20], argv[21] ); } CAMLprim value FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride, value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int dpt_in_stride = Long_val(vDpt_in_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows; const int output_dr = output_dpts * output_rows; const int output_drc = output_dpts * output_rows * output_cols; const int output_drcb = output_dpts * output_rows * output_cols * batches; const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols; INIT; TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE)); if (kern2d == NULL) exit(1); memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE)); int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1); int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1); int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1); int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2; int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2; int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif /* _OPENMP */ for (int i = 0; i < output_drcb; ++i) { int bt = i / output_drc; int jkd = i % output_drc; int j = jkd / output_dr; int kd = jkd % output_dr; int k = kd / output_dpts; int d = kd % output_dpts; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int dstart = d * dpt_stride - pd; const int cend = cstart + kernel_cols_up; const int rend = rstart + kernel_rows_up; const int dend = dstart + kernel_dpts_up; const int input_idx_base = bt * input_crdi; int cnt = 0; for (int a = cstart; a < cend; a += col_in_stride) { for (int b = rstart; b < rend; b += row_in_stride) { for (int c = dstart; c < dend; c += dpt_in_stride) { for (int h = 0; h < in_channel; ++h) { if (a >= 0 && a < input_cols && b >= 0 && b < input_rows && c >= 0 && c < input_dpts) { int input_idx = input_idx_base + a * input_rdi + b * input_di + c * in_channel + h; inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx]; } ++cnt; } } } } } GEMM(CblasRowMajor, CblasTrans, CblasNoTrans, out_channel, kernel_idrc, output_drcb, ALPHA, output_ptr, out_channel, inpt2d, kernel_idrc, BETA, kern2d, kernel_idrc); int cnt = 0; for (int j = 0; j < kernel_idrc; ++j) { for (int i = 0; i < out_channel; ++i) { kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j]; } } free(inpt2d); free(kern2d); return Val_unit; } CAMLprim value FUN_BYTE (dilated_cuboid_backward_kernel_im2col) (value * argv, int argn) { return FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18], argv[19], argv[20] ); } CAMLprim value FUN_NATIVE (dilated_cuboid_backward_input_im2col) ( value vInput, value vKernel, value vOutput, value vBatches, value vInput_cols, value vInput_rows, value vInput_dpts, value vIn_channel, value vKernel_cols, value vKernel_rows, value vKernel_dpts, value vOutput_cols, value vOutput_rows, value vOutput_dpts, value vOut_channel, value vDpt_stride, value vRow_stride, value vCol_stride, value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride ) { struct caml_ba_array *IN = Caml_ba_array_val(vInput); struct caml_ba_array *KE = Caml_ba_array_val(vKernel); struct caml_ba_array *OU = Caml_ba_array_val(vOutput); TYPE *input_ptr = (TYPE *) IN->data; TYPE *kernel_ptr = (TYPE *) KE->data; TYPE *output_ptr = (TYPE *) OU->data; int batches = Long_val(vBatches); int input_cols = Long_val(vInput_cols); int input_rows = Long_val(vInput_rows); int input_dpts = Long_val(vInput_dpts); int in_channel = Long_val(vIn_channel); int kernel_cols = Long_val(vKernel_cols); int kernel_rows = Long_val(vKernel_rows); int kernel_dpts = Long_val(vKernel_dpts); int output_cols = Long_val(vOutput_cols); int output_rows = Long_val(vOutput_rows); int output_dpts = Long_val(vOutput_dpts); int out_channel = Long_val(vOut_channel); int dpt_stride = Long_val(vDpt_stride); int row_stride = Long_val(vRow_stride); int col_stride = Long_val(vCol_stride); int dpt_in_stride = Long_val(vDpt_in_stride); int row_in_stride = Long_val(vRow_in_stride); int col_in_stride = Long_val(vCol_in_stride); const int input_crdi = in_channel * input_dpts * input_rows * input_cols; const int input_rdi = in_channel * input_dpts * input_rows; const int input_di = in_channel * input_dpts; const int output_dr = output_dpts * output_rows; const int output_drc = output_dpts * output_rows * output_cols; const int output_drcb = output_dpts * output_rows * output_cols * batches; const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols; TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE)); if (inpt2d == NULL) exit(1); memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE)); INIT; int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1); int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1); int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1); int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2; int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2; int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2; if (pc < 0) pc = 0; if (pr < 0) pr = 0; if (pd < 0) pd = 0; GEMM(CblasRowMajor, CblasNoTrans, CblasTrans, output_drcb, kernel_idrc, out_channel, ALPHA, output_ptr, out_channel, kernel_ptr, out_channel, BETA, inpt2d, kernel_idrc); for (int i = 0; i < output_drcb; ++i) { int bt = i / output_drc; int jkd = i % output_drc; int j = jkd / output_dr; int kd = jkd % output_dr; int k = kd / output_dpts; int d = kd % output_dpts; const int cstart = j * col_stride - pc; const int rstart = k * row_stride - pr; const int dstart = d * dpt_stride - pd; const int cend = cstart + kernel_cols_up; const int rend = rstart + kernel_rows_up; const int dend = dstart + kernel_dpts_up; const int input_idx_base = bt * input_crdi; int cnt = 0; for (int a = cstart; a < cend; a += col_in_stride) { for (int b = rstart; b < rend; b += row_in_stride) { for (int c = dstart; c < dend; c += dpt_in_stride) { for (int h = 0; h < in_channel; ++h) { if (a >= 0 && a < input_cols && b >= 0 && b < input_rows && c >= 0 && c < input_dpts) { int input_idx = input_idx_base + a * input_rdi + b * input_di + c * in_channel + h; input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt]; } ++cnt; } } } } } free(inpt2d); return Val_unit; } CAMLprim value FUN_BYTE (dilated_cuboid_backward_input_im2col) (value * argv, int argn) { return FUN_NATIVE (dilated_cuboid_backward_input_im2col) ( argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18], argv[19], argv[20] ); } #endif /* OWL_ENABLE_TEMPLATE */
convolution_1x1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2 * outw + w; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const unsigned short* r0 = bottom_blob.channel(p); unsigned short* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { outptr[0] = r0[0]; r0 += 2; outptr += 1; } r0 += tailstep; } } conv1x1s1_sgemm_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
ef_error.c
/* { dg-do compile } */ /* { dg-options "-fcilkplus -fopenmp-simd" } */ #pragma omp declare simd linear(y:1) simdlen(4) __attribute__((vector (linear (y:1), vectorlength(4)))) int func (int x, int y) { /* { dg-error "cannot be used in the same function marked as a Cilk Plus SIMD-enabled" } */ return (x+y); } __attribute__((vector (linear (y:1), private (x)))) /* { dg-error "is not valid for" } */ int func2 (int x, int y) { return (x+y); } __attribute__((vector (linear (y:1), simdlen (4)))) /* { dg-error "is not valid for" } */ int func2_1 (int x, int y) { return (x+y); } __attribute__((vector (linear (y:1), inbranch))) /* { dg-error "is not valid for" } */ int func2_3 (int x, int y) { return (x+y); } __attribute__((vector (notinbranch, vectorlength (4)))) /* { dg-error "is not valid for" } */ int func2_2 (int x, int y) { return (x+y); } int main (void) { return (func (5,6)); }
main.c
/* * Graph500 BFS * Uses the generator and test harness provided in the Graph 500 reference implementation v1.2 * * Kamesh Madduri, Penn State University * last updated: December 2011 */ /* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <assert.h> #include <math.h> #include <sys/stat.h> #if USE_MPI #include <mpi.h> #endif #ifdef __cplusplus extern "C" { #endif #include "generator/make_graph.h" #include "graph.h" #ifdef __cplusplus } #endif #ifdef _OPENMP #include <omp.h> #endif int rank, nprocs; enum {s_minimum, s_firstquartile, s_median, s_thirdquartile, s_maximum, s_mean, s_std, s_LAST}; static int compare_doubles(const void* a, const void* b) { double aa = *(const double*)a; double bb = *(const double*)b; return (aa < bb) ? -1 : (aa == bb) ? 0 : 1; } void get_statistics(const double x[], int n, double r[s_LAST]) { double temp; int i; /* Compute mean. */ temp = 0; for (i = 0; i < n; ++i) temp += x[i]; temp /= n; r[s_mean] = temp; /* Compute std. dev. */ temp = 0; for (i = 0; i < n; ++i) temp += (x[i] - r[s_mean]) * (x[i] - r[s_mean]); temp /= n - 1; r[s_std] = sqrt(temp); /* Sort x. */ double* xx = (double*)malloc(n * sizeof(double)); memcpy(xx, x, n * sizeof(double)); qsort(xx, n, sizeof(double), compare_doubles); /* Get order statistics. */ r[s_minimum] = xx[0]; r[s_firstquartile] = (xx[(n - 1) / 4] + xx[n / 4]) * .5; r[s_median] = (xx[(n - 1) / 2] + xx[n / 2]) * .5; r[s_thirdquartile] = (xx[n - 1 - (n - 1) / 4] + xx[n - 1 - n / 4]) * .5; r[s_maximum] = xx[n - 1]; /* Clean up. */ free(xx); } int main(int argc, char** argv) { int SCALE; double edgefactor; //uint64_t nedges; //uint32_t *edges; char *input_filename; int num_procrows, num_proccols, num_replicas; graph_gen_data_t ggi; //graph_gen_aux_data_t ggaux; int create_2D_graph = 0; int read_graph_from_file = 0; #if USE_MPI MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); #else rank = 0; nprocs = 1; #endif /* Parse arguments. */ SCALE = 16; edgefactor = 16.0; if (argc >= 2) SCALE = atoi(argv[1]); if (argc >= 3) edgefactor = atof(argv[2]); create_2D_graph = 0; if (argc <= 7) { num_replicas = atoi(argv[3]); num_procrows = atoi(argv[4]); num_proccols = atoi(argv[5]); if ((num_replicas * num_proccols * num_procrows) != nprocs) { if (rank > 0) { fprintf(stderr, "Invalid input!\n"); #if USE_MPI MPI_Abort(MPI_COMM_WORLD, 1); #else exit(1); #endif } } create_2D_graph = 1; } /* Read graph from file */ if (argc == 7) { read_graph_from_file = 1; input_filename = argv[6]; } if (argc <= 1 || argc >= 8) { if (rank == 0) { fprintf(stderr, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [float, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); } #if USE_MPI MPI_Barrier(MPI_COMM_WORLD); MPI_Abort(MPI_COMM_WORLD, 1); #else exit(1); #endif } #if USE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif /* Make the raw graph edges. */ #pragma omp parallel { #ifdef _OPENMP int nthreads = omp_get_num_threads(); #else int nthreads = 1; #endif #pragma omp single if (rank == 0) { fprintf(stderr, "SCALE: %d, %d MPI tasks, %d OMP threads\n" "nreplicas %d, nproc_rows %d, nproc_cols %d\n", SCALE, nprocs, nthreads, num_replicas, num_procrows, num_proccols); #if REPLICATE_D fprintf(stderr, "Vertex status array replicated\n"); #endif } } int64_t nedges; packed_edge* gen_edges; double make_graph_time; if (read_graph_from_file == 0) { double make_graph_start = get_seconds(); ggi.SCALE = SCALE; ggi.n = 1UL<<SCALE; ggi.m = edgefactor*ggi.n; // gen_graph_edges(&ggi, &ggaux); make_graph(SCALE, edgefactor*(1L<<SCALE), 1, 2, &nedges, &gen_edges); double make_graph_stop = get_seconds(); make_graph_time = make_graph_stop - make_graph_start; } else { double make_graph_start = get_seconds(); //ggi.SCALE = SCALE; //ggi.n = 1UL<<SCALE; //ggi.m = edgefactor*ggi.n; // read_graph(input_filename, &nedges, &gen_edges); FILE *infp = fopen(input_filename, "rb"); assert(infp != NULL); struct stat st; stat(input_filename, &st); long nedges_global = st.st_size/8; ggi.m = nedges_global; uint64_t read_offset_start = rank * 8* (nedges_global/nprocs); uint64_t read_offset_end = (rank+1) * 8 * (nedges_global/nprocs); if (rank == nprocs - 1) read_offset_end = 8*nedges_global; nedges = (read_offset_end - read_offset_start)/8; /* gen_edges is an array of unsigned ints of size 2*m_local */ fseek(infp, read_offset_start, SEEK_SET); uint32_t *gen_edges_fp = (uint32_t *) malloc(2*nedges*sizeof(uint32_t)); assert(gen_edges_fp != NULL); if (rank == 0) { fprintf(stderr, "nedges is %ld\n", nedges); } fread(gen_edges_fp, 2*nedges, sizeof(uint32_t), infp); fclose(infp); gen_edges = (packed_edge *) malloc(nedges*sizeof(packed_edge)); assert(gen_edges != NULL); uint32_t *gedges = (uint32_t *) gen_edges; for (long i=0; i<nedges; i++) { gedges[3*i] = gen_edges_fp[2*i]; gedges[3*i+1] = gen_edges_fp[2*i+1]; gedges[3*i+2] = 0; } free(gen_edges_fp); // gen_graph_edges(&ggi, &ggaux); // make_graph(SCALE, edgefactor*(1L<<SCALE), 1, 2, &nedges, &gen_edges); double make_graph_stop = get_seconds(); make_graph_time = make_graph_stop - make_graph_start; } #if USE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif /* the distributed graph data structure */ dist_graph_t g; //nedges = ggi.m_local; //edges = ggi.gen_edges; /* Time graph creation */ double data_struct_start = get_seconds(); uint32_t *edges = (uint32_t *) gen_edges; if (create_2D_graph) { g.nproc_rows = num_procrows; g.nproc_cols = num_proccols; g.nreplicas = num_replicas; create_2Ddist_graph(nedges, edges, &g); } else { create_dist_graph(nedges, edges, &g); } double data_struct_stop = get_seconds(); double data_struct_time = data_struct_stop - data_struct_start; /* Get roots for BFS runs. */ int num_bfs_roots = 32; uint64_t* bfs_roots = (uint64_t *) malloc(num_bfs_roots * sizeof(uint64_t)); find_bfs_start_vertices(num_bfs_roots, &g, bfs_roots); /* Number of edges visited in each BFS; a double so get_statistics can be * used directly. */ double* edge_counts = (double *) malloc(num_bfs_roots * sizeof(double)); /* Run BFS. */ int validation_passed = 1; double* bfs_times = (double *) malloc(num_bfs_roots * sizeof(double)); double* validate_times = (double *) malloc(num_bfs_roots * sizeof(double)); uint64_t* pred = (uint64_t *) _mm_malloc((3 * g.n_local) * sizeof(uint64_t), 16); assert(pred != NULL); uint8_t* d_trans = (uint8_t *) malloc(g.n_local * sizeof(uint8_t)); assert(d_trans != NULL); uint8_t* d_trans_full = (uint8_t *) malloc(g.n_local_row * sizeof(uint8_t)); assert(d_trans_full != NULL); int bfs_root_idx; #if 0 for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { uint64_t root = bfs_roots[bfs_root_idx]; if (rank == 0) fprintf(stderr, "Running BFS %d (%lu)\n", bfs_root_idx, root); /* Clear the pred array. */ memset(pred, 0, 3 * g.n_local * sizeof(uint64_t)); assert(pred != NULL); /* Do the actual BFS. */ double bfs_start = get_seconds(); uint64_t nvisited = 0; uint64_t pred_array_size = 0; if (create_2D_graph) run_bfs_2Dgraph(&g, root, pred, &pred_array_size, &nvisited); double bfs_stop = get_seconds(); bfs_times[bfs_root_idx] = bfs_stop - bfs_start; /* Calculate number of input edges visited. */ uint64_t edge_visit_count = 0; #if REPLICATE_D for (uint64_t i = 0; i < g.n_local_row; i++) { for (uint64_t j=g.num_edges[i]; j<g.num_edges[i+1]; j++) { uint64_t v = g.adj[j]; uint32_t dv = g.d[v/2]; if ((v & 1U) == 0) { dv = (dv & 0xF0)>>4; } else { dv = (dv & 0x0F); } if (dv != 0) edge_visit_count++; } } int irow = (g.comm_data).irow; int jcol = (g.comm_data).jcol; int nproc_rows = g.nproc_rows; int nproc_cols = g.nproc_cols; int recv_proc = ((irow*nproc_cols+jcol)/nproc_rows) + nproc_cols * ((irow*nproc_cols+jcol)%nproc_rows); MPI_Status status1; uint8_t *d_send_offset = g.d + (irow*g.n_local)/2; if (rank == 0) { if (g.n_local % 2 != 0) fprintf(stderr, "Warning! Visited edge count will be incorrect\n"); } MPI_Sendrecv(d_send_offset, g.n_local, MPI_UNSIGNED_CHAR, jcol*nproc_rows+irow, rank % (nproc_cols * nproc_rows), d_trans, g.n_local, MPI_UNSIGNED_CHAR, recv_proc, recv_proc, (g.comm_data).replicas_comm, &status1); MPI_Allgather(d_trans, g.n_local, MPI_UNSIGNED_CHAR, d_trans_full, g.n_local, MPI_UNSIGNED_CHAR, (g.comm_data).row_comm); g.d_trans = d_trans; g.d_trans_full = d_trans_full; #else int irow = (g.comm_data).irow; int jcol = (g.comm_data).jcol; int nproc_rows = g.nproc_rows; int nproc_cols = g.nproc_cols; int recv_proc = ((irow*nproc_cols+jcol)/nproc_rows) + nproc_cols * ((irow*nproc_cols+jcol)%nproc_rows); MPI_Status status1; MPI_Sendrecv(g.d, g.n_local, MPI_UNSIGNED_CHAR, jcol*nproc_rows+irow, rank % (nproc_cols * nproc_rows), d_trans, g.n_local, MPI_UNSIGNED_CHAR, recv_proc, recv_proc, (g.comm_data).replicas_comm, &status1); MPI_Allgather(d_trans, g.n_local, MPI_UNSIGNED_CHAR, d_trans_full, g.n_local, MPI_UNSIGNED_CHAR, (g.comm_data).row_comm); g.d_trans = d_trans; g.d_trans_full = d_trans_full; for (uint64_t i = 0; i < g.n_local_row; i++) { uint64_t u_off = i % g.n_local; uint32_t du = d_trans_full[(i/g.n_local)*g.n_local + u_off/2]; if ((u_off & 1U) == 0) { du = (du & 0xF0)>>4; } else { du = (du & 0x0F); } if (du != 0) { edge_visit_count += (g.num_edges[i+1] - g.num_edges[i]); } } #endif #if USE_MPI MPI_Allreduce(MPI_IN_PLACE, &edge_visit_count, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); #endif edge_counts[bfs_root_idx] = (double) edge_visit_count/2; if (rank==0) fprintf(stderr, "edge visit count: %ld\n", edge_visit_count/2); double validate_start = get_seconds(); int validation_passed_one = validate_bfs_result(&g, root, pred, pred_array_size); double validate_stop = get_seconds(); validate_times[bfs_root_idx] = validate_stop - validate_start; if (!validation_passed_one) { validation_passed = 0; if (rank == 0) fprintf(stderr, "Validation failed for this BFS root; skipping rest.\n"); break; } MPI_Barrier(MPI_COMM_WORLD); } #if USE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif /* Print results. */ if (rank == 0) { fflush(stderr); fprintf(stderr, "\n\n"); if (!validation_passed) { fprintf(stderr, "No results printed for invalid run.\n"); } else { int i; fprintf(stderr, "SCALE: %d\n", SCALE); fprintf(stderr, "edgefactor: %.2g\n", edgefactor); fprintf(stderr, "NBFS: %d\n", num_bfs_roots); fprintf(stderr, "num_mpi_processes: %d\n", nprocs); fprintf(stderr, "graph_generation: %g s\n", make_graph_time); fprintf(stderr, "construction_time: %g s\n", data_struct_time); double stats[s_LAST]; get_statistics(bfs_times, num_bfs_roots, stats); fprintf(stderr, "min_time: %g s\n", stats[s_minimum]); fprintf(stderr, "firstquartile_time: %g s\n", stats[s_firstquartile]); fprintf(stderr, "median_time: %g s\n", stats[s_median]); fprintf(stderr, "thirdquartile_time: %g s\n", stats[s_thirdquartile]); fprintf(stderr, "max_time: %g s\n", stats[s_maximum]); fprintf(stderr, "mean_time: %g s\n", stats[s_mean]); fprintf(stderr, "stddev_time: %g\n", stats[s_std]); get_statistics(edge_counts, num_bfs_roots, stats); fprintf(stderr, "min_nedge: %.11g\n", stats[s_minimum]); fprintf(stderr, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]); fprintf(stderr, "median_nedge: %.11g\n", stats[s_median]); fprintf(stderr, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]); fprintf(stderr, "max_nedge: %.11g\n", stats[s_maximum]); fprintf(stderr, "mean_nedge: %.11g\n", stats[s_mean]); fprintf(stderr, "stddev_nedge: %.11g\n", stats[s_std]); double* secs_per_edge = (double*)malloc(num_bfs_roots * sizeof(double)); for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i]; get_statistics(secs_per_edge, num_bfs_roots, stats); fprintf(stderr, "min_TEPS: %g TEPS\n", 1. / stats[s_maximum]); fprintf(stderr, "firstquartile_TEPS: %g TEPS\n", 1. / stats[s_thirdquartile]); fprintf(stderr, "median_TEPS: %g TEPS\n", 1. / stats[s_median]); fprintf(stderr, "thirdquartile_TEPS: %g TEPS\n", 1. / stats[s_firstquartile]); fprintf(stderr, "max_TEPS: %g TEPS\n", 1. / stats[s_minimum]); fprintf(stderr, "harmonic_mean_TEPS: %g TEPS\n", 1. / stats[s_mean]); /* Formula from: * Title: The Standard Errors of the Geometric and Harmonic Means and * Their Application to Index Numbers * Author(s): Nilan Norris * Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448 * Publisher(s): Institute of Mathematical Statistics * Stable URL: http://www.jstor.org/stable/2235723 * (same source as in specification). */ fprintf(stderr, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1))); free(secs_per_edge); secs_per_edge = NULL; get_statistics(validate_times, num_bfs_roots, stats); fprintf(stderr, "min_validate: %g s\n", stats[s_minimum]); fprintf(stderr, "firstquartile_validate: %g s\n", stats[s_firstquartile]); fprintf(stderr, "median_validate: %g s\n", stats[s_median]); fprintf(stderr, "thirdquartile_validate: %g s\n", stats[s_thirdquartile]); fprintf(stderr, "max_validate: %g s\n", stats[s_maximum]); fprintf(stderr, "mean_validate: %g s\n", stats[s_mean]); fprintf(stderr, "stddev_validate: %g\n", stats[s_std]); #if 0 for (i = 0; i < num_bfs_roots; ++i) { fprintf(stderr, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]); } #endif } fflush(stderr); } #endif #if 1 for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { uint64_t root = bfs_roots[bfs_root_idx]; if (rank == 0) fprintf(stderr, "Running threaded BFS %d (%lu)\n", bfs_root_idx, root); /* Clear the pred array. */ memset(pred, 0, 3 * g.n_local * sizeof(uint64_t)); assert(pred != NULL); /* Do the actual BFS. */ double bfs_start = get_seconds(); uint64_t nvisited = 0; uint64_t pred_array_size = 0; if (create_2D_graph) run_bfs_2Dgraph_threaded(&g, root, pred, &pred_array_size, &nvisited); double bfs_stop = get_seconds(); bfs_times[bfs_root_idx] = bfs_stop - bfs_start; /* Calculate number of input edges visited. */ uint64_t edge_visit_count = 0; #if REPLICATE_D for (uint64_t i = 0; i < g.n_local_row; i++) { for (uint64_t j=g.num_edges[i]; j<g.num_edges[i+1]; j++) { uint64_t v = g.adj[j]; uint32_t dv = g.d[v]; if (dv != 0) edge_visit_count++; } } int irow = (g.comm_data).irow; int jcol = (g.comm_data).jcol; int nproc_rows = g.nproc_rows; int nproc_cols = g.nproc_cols; int recv_proc = ((irow*nproc_cols+jcol)/nproc_rows) + nproc_cols * ((irow*nproc_cols+jcol)%nproc_rows); MPI_Status status1; uint8_t *d_send_offset = g.d + (irow*g.n_local); MPI_Sendrecv(d_send_offset, g.n_local, MPI_UNSIGNED_CHAR, jcol*nproc_rows+irow, rank % (nproc_cols * nproc_rows), d_trans, g.n_local, MPI_UNSIGNED_CHAR, recv_proc, recv_proc, (g.comm_data).replicas_comm, &status1); MPI_Allgather(d_trans, g.n_local, MPI_UNSIGNED_CHAR, d_trans_full, g.n_local, MPI_UNSIGNED_CHAR, (g.comm_data).row_comm); g.d_trans = d_trans; g.d_trans_full = d_trans_full; #else int irow = (g.comm_data).irow; int jcol = (g.comm_data).jcol; int nproc_rows = g.nproc_rows; int nproc_cols = g.nproc_cols; int recv_proc = ((irow*nproc_cols+jcol)/nproc_rows) + nproc_cols * ((irow*nproc_cols+jcol)%nproc_rows); MPI_Status status1; MPI_Sendrecv(g.d, g.n_local, MPI_UNSIGNED_CHAR, jcol*nproc_rows+irow, rank % (nproc_cols * nproc_rows), d_trans, g.n_local, MPI_UNSIGNED_CHAR, recv_proc, recv_proc, (g.comm_data).replicas_comm, &status1); MPI_Allgather(d_trans, g.n_local, MPI_UNSIGNED_CHAR, d_trans_full, g.n_local, MPI_UNSIGNED_CHAR, (g.comm_data).row_comm); g.d_trans = d_trans; g.d_trans_full = d_trans_full; for (uint64_t i = 0; i < g.n_local_row; i++) { uint64_t u_off = i % g.n_local; uint32_t du = d_trans_full[(i/g.n_local)*g.n_local + u_off]; if (du != 0) { edge_visit_count += (g.num_edges[i+1] - g.num_edges[i]); } } #endif #if USE_MPI MPI_Allreduce(MPI_IN_PLACE, &edge_visit_count, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); #endif edge_counts[bfs_root_idx] = (double) edge_visit_count/2; double validate_start = get_seconds(); int validation_passed_one = validate_bfs_result_threaded(&g, root, pred, pred_array_size); double validate_stop = get_seconds(); validate_times[bfs_root_idx] = validate_stop - validate_start; if (!validation_passed_one) { validation_passed = 0; if (rank == 0) fprintf(stderr, "Validation failed for this BFS root; skipping rest.\n"); break; } MPI_Barrier(MPI_COMM_WORLD); } /* Print results. */ if (rank == 0) { fflush(stderr); fprintf(stderr, "\n\n"); if (!validation_passed) { fprintf(stderr, "No results printed for invalid run.\n"); } else { int i; fprintf(stderr, "SCALE: %d\n", SCALE); fprintf(stderr, "edgefactor: %.2g\n", edgefactor); fprintf(stderr, "NBFS: %d\n", num_bfs_roots); fprintf(stderr, "num_mpi_processes: %d\n", nprocs); fprintf(stderr, "graph_generation: %g s\n", make_graph_time); fprintf(stderr, "construction_time: %g s\n", data_struct_time); double stats[s_LAST]; get_statistics(bfs_times, num_bfs_roots, stats); fprintf(stderr, "min_time: %g s\n", stats[s_minimum]); fprintf(stderr, "firstquartile_time: %g s\n", stats[s_firstquartile]); fprintf(stderr, "median_time: %g s\n", stats[s_median]); fprintf(stderr, "thirdquartile_time: %g s\n", stats[s_thirdquartile]); fprintf(stderr, "max_time: %g s\n", stats[s_maximum]); fprintf(stderr, "mean_time: %g s\n", stats[s_mean]); fprintf(stderr, "stddev_time: %g\n", stats[s_std]); get_statistics(edge_counts, num_bfs_roots, stats); fprintf(stderr, "min_nedge: %.11g\n", stats[s_minimum]); fprintf(stderr, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]); fprintf(stderr, "median_nedge: %.11g\n", stats[s_median]); fprintf(stderr, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]); fprintf(stderr, "max_nedge: %.11g\n", stats[s_maximum]); fprintf(stderr, "mean_nedge: %.11g\n", stats[s_mean]); fprintf(stderr, "stddev_nedge: %.11g\n", stats[s_std]); double* secs_per_edge = (double*)malloc(num_bfs_roots * sizeof(double)); for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i]; get_statistics(secs_per_edge, num_bfs_roots, stats); fprintf(stderr, "min_TEPS: %g TEPS\n", 1. / stats[s_maximum]); fprintf(stderr, "firstquartile_TEPS: %g TEPS\n", 1. / stats[s_thirdquartile]); fprintf(stderr, "median_TEPS: %g TEPS\n", 1. / stats[s_median]); fprintf(stderr, "thirdquartile_TEPS: %g TEPS\n", 1. / stats[s_firstquartile]); fprintf(stderr, "max_TEPS: %g TEPS\n", 1. / stats[s_minimum]); fprintf(stderr, "harmonic_mean_TEPS: %g TEPS\n", 1. / stats[s_mean]); /* Formula from: * Title: The Standard Errors of the Geometric and Harmonic Means and * Their Application to Index Numbers * Author(s): Nilan Norris * Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448 * Publisher(s): Institute of Mathematical Statistics * Stable URL: http://www.jstor.org/stable/2235723 * (same source as in specification). */ fprintf(stderr, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1))); free(secs_per_edge); secs_per_edge = NULL; get_statistics(validate_times, num_bfs_roots, stats); fprintf(stderr, "min_validate: %g s\n", stats[s_minimum]); fprintf(stderr, "firstquartile_validate: %g s\n", stats[s_firstquartile]); fprintf(stderr, "median_validate: %g s\n", stats[s_median]); fprintf(stderr, "thirdquartile_validate: %g s\n", stats[s_thirdquartile]); fprintf(stderr, "max_validate: %g s\n", stats[s_maximum]); fprintf(stderr, "mean_validate: %g s\n", stats[s_mean]); fprintf(stderr, "stddev_validate: %g\n", stats[s_std]); #if 0 for (i = 0; i < num_bfs_roots; ++i) { fprintf(stderr, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]); } #endif } fflush(stderr); } #endif free(edge_counts); edge_counts = NULL; _mm_free(pred); free(bfs_roots); free_graph(&g); free(d_trans); free(d_trans_full); free(bfs_times); free(validate_times); #if USE_MPI MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); #endif return 0; }
estimator.h
// Copyright (C) 2013 The Regents of the University of California (Regents). // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents or University of California nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Please contact the author of this library if you have any questions. // Author: Chris Sweeney (cmsweeney@cs.ucsb.edu) #ifndef THEIA_SOLVERS_ESTIMATOR_H_ #define THEIA_SOLVERS_ESTIMATOR_H_ #include <glog/logging.h> #ifdef THEIA_USE_OPENMP #include <omp.h> #endif #include <vector> namespace theia { // Templated class for estimating a model for RANSAC. This class is purely a // virtual class and should be implemented for the specific task that RANSAC is // being used for. Two methods must be implemented: EstimateModel and Error. All // other methods are optional, but will likely enhance the quality of the RANSAC // output. // // NOTE: RANSAC, ARRSAC, and other solvers work best if Datum and Model are // lightweight classes or structs. template <typename DatumType, typename ModelType> class Estimator { public: typedef DatumType Datum; typedef ModelType Model; Estimator() {} virtual ~Estimator() {} // Get the minimum number of samples needed to generate a model. virtual double SampleSize() const = 0; // Given a set of data points, estimate the model. Users should implement this // function appropriately for the task being solved. Returns true for // successful model estimation (and outputs model), false for failed // estimation. Typically, this is a minimal set, but it is not required to be. virtual bool EstimateModel(const std::vector<Datum>& data, std::vector<Model>* model) const = 0; // Estimate a model from a non-minimal sampling of the data. E.g. for a line, // use SVD on a set of points instead of constructing a line from two points. // By default, this simply implements the minimal case. virtual bool EstimateModelNonminimal(const std::vector<Datum>& data, std::vector<Model>* model) const { return EstimateModel(data, model); } // Refine the model based on an updated subset of data, and a pre-computed // model. Can be optionally implemented. virtual bool RefineModel(const std::vector<Datum>& data, Model* model) const { return true; } // Given a model and a data point, calculate the error. Users should implement // this function appropriately for the task being solved. virtual double Error(const Datum& data, const Model& model) const = 0; // Compute the residuals of many data points. By default this is just a loop // that calls Error() on each data point, but this function can be useful if // the errors of multiple points may be estimated simultanesously (e.g., // matrix multiplication to compute the reprojection error of many points at // once). virtual std::vector<double> Residuals(const std::vector<Datum>& data, const Model& model) const { std::vector<double> residuals(data.size()); #pragma omp parallel for for (int i = 0; i < data.size(); i++) { residuals[i] = Error(data[i], model); } return residuals; } // Returns the set inliers of the data set based on the error threshold // provided. std::vector<int> GetInliers(const std::vector<Datum>& data, const Model& model, double error_threshold) const { std::vector<int> inliers; inliers.reserve(data.size()); for (int i = 0; i < data.size(); i++) { if (Error(data[i], model) < error_threshold) { inliers.push_back(i); } } return inliers; } // Enable a quick check to see if the model is valid. This can be a geometric // check or some other verification of the model structure. virtual bool ValidModel(const Model& model) const { return true; } }; } // namespace theia #endif // THEIA_SOLVERS_ESTIMATOR_H_
integration_point_to_node_transformation_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Michael Andre, https://github.com/msandre // #if !defined(KRATOS_INTEGRATION_POINT_TO_NODE_TRANSFORMATION_UTILITY_H_INCLUDED) #define KRATOS_INTEGRATION_POINT_TO_NODE_TRANSFORMATION_UTILITY_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "includes/define.h" #include "includes/element.h" #include "includes/model_part.h" #include "utilities/openmp_utils.h" // Application includes #include "fluid_dynamics_application_variables.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Classes ///@{ /** * @brief A utility for transforming values on integration points to nodes. * * This utility was created to transform vorticity and q-criterion variables * from the integration points where they are computed to the nodes for * visualization. The utility is designed to work in both 2D and 3D with and * without the MPI library. Each nodal value is computed as a weighted average * of the neighboring elements. */ template<unsigned int TDim, unsigned int TNumNodes = TDim + 1> class IntegrationPointToNodeTransformationUtility { public: ///@name Type Definitions ///@{ /// Pointer definition of IntegrationPointToNodeTransformationUtility KRATOS_CLASS_POINTER_DEFINITION(IntegrationPointToNodeTransformationUtility); template<class TVariableType> void TransformFromIntegrationPointsToNodes(const Variable<TVariableType>& rVariable, ModelPart& rModelPart) const { #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { itNode->FastGetSolutionStepValue(rVariable) = rVariable.Zero(); itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); std::vector<TVariableType> ValuesOnIntPoint; for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { const auto& r_process_info = rModelPart.GetProcessInfo(); itElem->CalculateOnIntegrationPoints(rVariable,ValuesOnIntPoint,r_process_info); Element::GeometryType& rGeom = itElem->GetGeometry(); const double Weight = rGeom.Volume() / (double) TNumNodes; for (unsigned int iNode = 0; iNode < rGeom.size(); iNode++) { rGeom[iNode].SetLock(); rGeom[iNode].FastGetSolutionStepValue(rVariable) += Weight * ValuesOnIntPoint[0]; rGeom[iNode].FastGetSolutionStepValue(NODAL_AREA) += Weight; rGeom[iNode].UnSetLock(); } } } rModelPart.GetCommunicator().AssembleCurrentData(rVariable); rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); itNode->FastGetSolutionStepValue(rVariable) /= NodalArea; } } } }; // class IntegrationPointToNodalDataTransformationUtility ///@} ///@} // Fluid Dynamics Application group } // namespace Kratos #endif // KRATOS_INTEGRATION_POINT_TO_NODAL_DATA_TRANSFORMATION_UTILITY_H_INCLUDED defined
GB_unop__identity_bool_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_bool_uint64 // op(A') function: GB_unop_tran__identity_bool_uint64 // C type: bool // A type: uint64_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_bool_uint64 ( bool *Cx, // Cx and Ax may be aliased const uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_bool_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test_master1.c
//===-- test_master1.cc - Test the "master" construct -------------*- C -*-===// // // Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // // This file has been modified from the file // openmp/runtime/test/master/omp_master.c // of the LLVM project (https://github.com/llvm/llvm-project) // under the Apache License v2.0 with LLVM Exceptions. // //===----------------------------------------------------------------------===// #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "tests.h" int test_omp_master(void) { int nthreads; int executing_thread; nthreads = 0; executing_thread = -1; #pragma omp parallel { #pragma omp master { #pragma omp atomic nthreads++; executing_thread = omp_get_thread_num(); } /* end of master*/ } /* end of parallel*/ return ((nthreads == 1) && (executing_thread == 0)); } int main(void) { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_master()) { num_failed++; } } return num_failed != 0 ? EXIT_FAILURE : EXIT_SUCCESS; }
convolution_omp.c
/********************************** * Programmer: Dimitrios Gkoulis * * Email: gkould@gmail.com * * Harokopio University of Athens * * Parallel Computing & Algorithms * * Project - Winter Semester * * Period: 2015 - 2016 * * IDE: Dec-C++ (4.9.0.2) * ***********************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #define I_SIZE 100 // Input Size y (length) = x (length) #define H_SIZE 3 // Mask Size y (length) = x (length) #define SAVETOFILE 1 // Save to file flag /* F U N C T I O N S D E C L A R A T I O N S */ int convolution(int I[I_SIZE][I_SIZE], int h[H_SIZE][H_SIZE], int y, int x); // CALCULATE CONVOLUTION (FUNCTION) void save_to_file(int A[I_SIZE][I_SIZE]); // SAVE TO FILE (FUNCTION) /* M A I N */ int main(int argc, char *argv[]) { // Input int I[I_SIZE][I_SIZE] = { { 139, 13, 92, 251, 137, 124, 233, 91, 75, 11, 188, 184, 191, 118, 41, 228, 21, 93, 112, 51, 131, 141, 160, 76, 219, 211, 26, 241, 163, 140, 162, 195, 250, 227, 237, 217, 127, 148, 190, 89, 169, 243, 212, 135, 130, 33, 221, 234, 246, 200, 3, 120, 84, 208, 60, 152, 69, 155, 6, 34, 180, 240, 31, 99, 142, 88, 73, 201, 52, 83, 78, 186, 226, 229, 179, 214, 159, 38, 147, 101, 225, 207, 105, 167, 235, 10, 174, 98, 58, 59, 39, 61, 108, 70, 80, 9, 145, 48, 50, 44 }, { 54, 109, 78, 229, 85, 118, 164, 49, 153, 76, 165, 114, 158, 228, 112, 113, 156, 183, 254, 3, 230, 132, 198, 64, 90, 130, 223, 115, 190, 18, 201, 70, 124, 125, 245, 150, 189, 253, 143, 72, 239, 119, 33, 120, 82, 181, 251, 172, 57, 6, 71, 166, 2, 131, 15, 58, 101, 117, 176, 152, 51, 135, 180, 249, 137, 11, 216, 133, 22, 232, 204, 157, 184, 111, 202, 47, 177, 182, 147, 191, 162, 227, 175, 84, 211, 79, 142, 243, 46, 195, 94, 188, 159, 171, 29, 185, 214, 169, 20, 238 }, { 76, 188, 133, 42, 58, 128, 149, 253, 239, 114, 159, 74, 125, 105, 176, 40, 119, 198, 175, 26, 86, 20, 223, 214, 168, 204, 19, 15, 142, 80, 157, 6, 43, 232, 206, 230, 192, 242, 21, 124, 32, 217, 184, 127, 9, 189, 209, 254, 118, 112, 233, 140, 194, 144, 5, 95, 248, 181, 251, 1, 94, 73, 250, 49, 255, 85, 99, 82, 56, 236, 8, 39, 226, 69, 207, 107, 190, 171, 31, 103, 235, 154, 93, 155, 137, 14, 247, 59, 195, 132, 136, 174, 165, 3, 79, 224, 185, 151, 11, 104 }, { 92, 178, 245, 180, 115, 88, 208, 134, 32, 204, 206, 182, 50, 129, 24, 221, 90, 230, 0, 252, 176, 251, 228, 80, 244, 51, 15, 22, 17, 83, 27, 52, 146, 168, 105, 58, 31, 13, 114, 184, 57, 53, 222, 56, 153, 69, 40, 93, 65, 71, 249, 186, 158, 63, 139, 9, 229, 133, 96, 28, 211, 130, 159, 171, 140, 166, 203, 116, 121, 219, 30, 73, 161, 55, 11, 2, 46, 141, 138, 197, 238, 164, 232, 199, 109, 98, 102, 179, 86, 196, 152, 118, 216, 87, 29, 205, 72, 254, 195, 119 }, { 198, 115, 218, 111, 17, 131, 56, 139, 86, 182, 39, 157, 235, 85, 120, 180, 227, 232, 118, 109, 83, 188, 249, 103, 59, 213, 78, 55, 29, 207, 89, 72, 106, 75, 238, 28, 14, 158, 79, 7, 191, 34, 145, 236, 143, 166, 203, 151, 146, 40, 154, 160, 152, 6, 159, 81, 130, 18, 19, 161, 239, 132, 42, 171, 178, 65, 97, 123, 126, 181, 88, 174, 149, 219, 22, 195, 153, 77, 140, 200, 138, 205, 71, 122, 116, 94, 189, 61, 54, 177, 121, 48, 100, 87, 199, 231, 224, 13, 184, 164 }, { 180, 250, 104, 194, 235, 136, 2, 242, 138, 24, 209, 119, 210, 38, 155, 203, 251, 248, 149, 118, 247, 19, 230, 44, 1, 17, 202, 69, 191, 106, 131, 238, 43, 243, 221, 47, 100, 123, 222, 90, 253, 6, 246, 84, 229, 193, 228, 255, 142, 18, 133, 154, 132, 101, 3, 48, 117, 165, 205, 21, 86, 156, 254, 34, 109, 50, 158, 240, 4, 75, 170, 169, 92, 80, 147, 245, 63, 77, 204, 83, 11, 49, 68, 13, 236, 130, 112, 179, 76, 174, 167, 216, 116, 70, 88, 159, 129, 87, 22, 173 }, { 45, 99, 115, 242, 110, 189, 164, 94, 114, 183, 181, 154, 151, 91, 209, 4, 78, 198, 72, 203, 116, 188, 232, 69, 157, 136, 106, 177, 7, 245, 1, 55, 105, 75, 205, 14, 184, 210, 216, 51, 165, 250, 5, 220, 173, 231, 58, 118, 86, 130, 97, 56, 153, 61, 64, 127, 156, 140, 186, 122, 191, 132, 201, 57, 168, 26, 175, 125, 227, 162, 79, 62, 172, 120, 146, 104, 143, 174, 102, 121, 155, 17, 89, 71, 28, 96, 126, 170, 43, 63, 111, 249, 195, 207, 33, 9, 196, 108, 135, 25 }, { 189, 239, 219, 14, 226, 179, 158, 89, 111, 102, 87, 59, 6, 142, 62, 53, 30, 1, 136, 180, 120, 133, 5, 190, 162, 90, 36, 94, 191, 241, 11, 24, 58, 116, 103, 38, 143, 170, 74, 152, 188, 65, 48, 176, 249, 145, 251, 29, 245, 3, 15, 52, 92, 19, 124, 97, 139, 236, 151, 221, 27, 68, 72, 193, 140, 244, 225, 26, 0, 115, 206, 232, 131, 35, 69, 4, 98, 197, 215, 113, 246, 172, 42, 93, 196, 178, 216, 213, 12, 198, 211, 71, 203, 122, 218, 167, 148, 242, 223, 163 }, { 172, 216, 56, 50, 250, 182, 79, 113, 132, 130, 83, 30, 179, 213, 149, 37, 196, 4, 19, 73, 20, 99, 251, 34, 173, 185, 71, 74, 158, 84, 162, 86, 26, 141, 112, 120, 106, 62, 63, 40, 204, 38, 33, 49, 118, 111, 226, 138, 124, 2, 80, 201, 41, 247, 36, 234, 150, 61, 85, 253, 110, 176, 171, 167, 224, 170, 188, 151, 193, 1, 115, 145, 109, 184, 232, 223, 144, 51, 187, 43, 48, 94, 65, 215, 127, 135, 29, 248, 212, 221, 235, 202, 72, 67, 131, 52, 21, 210, 23, 92 }, { 9, 211, 28, 182, 226, 206, 13, 189, 167, 125, 183, 177, 104, 180, 221, 173, 53, 151, 11, 40, 4, 45, 88, 209, 143, 95, 247, 215, 69, 158, 46, 37, 126, 196, 92, 244, 103, 201, 141, 10, 121, 179, 112, 56, 223, 98, 34, 87, 222, 120, 102, 156, 90, 153, 64, 22, 140, 75, 216, 57, 99, 44, 139, 229, 17, 163, 131, 255, 157, 5, 24, 200, 67, 248, 114, 249, 164, 191, 117, 224, 55, 52, 32, 89, 49, 38, 218, 73, 193, 148, 101, 59, 252, 159, 155, 178, 192, 169, 186, 107 }, { 94, 16, 122, 204, 188, 55, 238, 39, 173, 85, 7, 79, 0, 44, 234, 159, 243, 250, 246, 33, 236, 175, 25, 58, 57, 24, 182, 150, 224, 218, 249, 70, 103, 242, 194, 15, 132, 74, 36, 52, 232, 255, 136, 212, 87, 23, 115, 252, 59, 206, 30, 130, 195, 111, 149, 26, 251, 64, 50, 80, 139, 104, 223, 189, 17, 69, 29, 46, 138, 193, 38, 62, 47, 186, 35, 133, 49, 207, 214, 166, 145, 183, 169, 253, 8, 220, 235, 228, 73, 248, 200, 165, 164, 2, 6, 101, 37, 88, 140, 14 }, { 118, 139, 179, 58, 148, 215, 245, 227, 32, 87, 206, 200, 128, 36, 230, 183, 216, 201, 69, 41, 210, 110, 39, 173, 74, 37, 140, 137, 154, 223, 244, 164, 246, 101, 145, 12, 138, 172, 16, 43, 180, 8, 255, 232, 247, 21, 238, 249, 20, 100, 211, 104, 240, 239, 83, 14, 10, 151, 160, 22, 105, 78, 33, 199, 103, 116, 212, 68, 96, 55, 102, 80, 49, 135, 217, 233, 98, 189, 9, 202, 108, 144, 111, 177, 59, 159, 153, 0, 48, 196, 3, 112, 147, 155, 134, 191, 60, 143, 146, 150 }, { 123, 115, 207, 198, 232, 170, 92, 129, 58, 62, 169, 135, 29, 69, 11, 51, 70, 174, 111, 184, 20, 200, 30, 239, 244, 89, 212, 154, 42, 34, 83, 6, 44, 52, 31, 195, 160, 176, 53, 94, 71, 156, 102, 163, 56, 139, 65, 90, 225, 243, 150, 84, 60, 72, 179, 192, 101, 12, 146, 229, 100, 218, 109, 48, 223, 122, 248, 78, 33, 140, 238, 190, 167, 158, 241, 3, 133, 35, 205, 38, 242, 49, 250, 157, 177, 32, 209, 172, 142, 159, 219, 240, 247, 204, 82, 217, 77, 221, 185, 253 }, { 209, 236, 108, 235, 93, 205, 146, 203, 152, 154, 158, 77, 89, 175, 38, 241, 242, 248, 160, 111, 163, 170, 147, 208, 110, 244, 50, 75, 62, 189, 181, 104, 88, 195, 134, 143, 253, 153, 218, 135, 144, 92, 127, 204, 166, 174, 219, 162, 2, 115, 30, 114, 151, 55, 24, 149, 70, 15, 82, 145, 106, 124, 61, 179, 96, 107, 112, 148, 45, 165, 243, 29, 183, 250, 187, 254, 100, 225, 25, 102, 198, 180, 185, 109, 21, 41, 126, 150, 226, 20, 238, 119, 159, 245, 217, 233, 86, 43, 184, 211 }, { 81, 194, 56, 92, 223, 110, 203, 42, 95, 122, 255, 101, 2, 117, 22, 185, 159, 214, 25, 215, 127, 227, 176, 136, 16, 7, 15, 84, 188, 23, 213, 245, 130, 182, 254, 175, 156, 106, 243, 118, 59, 28, 149, 228, 27, 99, 60, 6, 250, 237, 49, 216, 140, 102, 248, 48, 121, 74, 231, 89, 181, 162, 148, 252, 119, 180, 3, 239, 0, 76, 9, 179, 105, 224, 172, 57, 77, 86, 31, 218, 64, 79, 146, 145, 192, 38, 111, 167, 35, 154, 229, 58, 124, 199, 155, 246, 109, 125, 62, 39 }, { 191, 22, 41, 53, 27, 11, 168, 52, 101, 18, 145, 132, 36, 93, 121, 102, 199, 180, 33, 99, 67, 63, 200, 165, 59, 24, 136, 92, 198, 207, 23, 106, 73, 196, 118, 238, 228, 94, 176, 21, 221, 250, 96, 231, 20, 39, 185, 76, 151, 245, 61, 249, 26, 77, 240, 128, 113, 242, 135, 236, 42, 156, 83, 44, 129, 10, 17, 218, 169, 19, 212, 224, 50, 127, 211, 192, 115, 7, 157, 6, 55, 15, 46, 248, 237, 220, 105, 166, 109, 107, 148, 170, 159, 114, 160, 86, 215, 230, 183, 252 }, { 91, 182, 176, 165, 101, 30, 88, 1, 7, 239, 120, 77, 207, 138, 107, 150, 16, 124, 156, 130, 229, 81, 115, 83, 118, 151, 210, 213, 119, 11, 29, 131, 187, 127, 129, 97, 70, 186, 35, 236, 95, 117, 102, 3, 143, 240, 0, 198, 108, 22, 215, 19, 53, 112, 45, 42, 214, 32, 73, 202, 6, 89, 164, 116, 72, 188, 59, 157, 25, 14, 126, 33, 252, 163, 234, 178, 71, 248, 179, 41, 109, 228, 46, 121, 154, 44, 85, 167, 155, 15, 136, 142, 103, 79, 48, 253, 105, 84, 78, 87 }, { 14, 76, 159, 62, 67, 180, 39, 86, 61, 170, 155, 187, 34, 64, 109, 87, 183, 21, 84, 70, 201, 130, 117, 29, 36, 27, 161, 221, 114, 234, 208, 73, 44, 231, 97, 60, 230, 215, 181, 207, 243, 237, 128, 17, 251, 124, 252, 126, 5, 9, 168, 55, 37, 93, 160, 222, 30, 112, 174, 144, 65, 131, 211, 203, 85, 175, 119, 52, 89, 16, 0, 188, 120, 156, 157, 164, 152, 48, 66, 57, 22, 218, 241, 205, 151, 1, 15, 240, 80, 94, 45, 228, 206, 204, 118, 173, 150, 74, 106, 41 }, { 11, 33, 44, 45, 78, 25, 60, 130, 62, 195, 197, 114, 117, 233, 116, 230, 121, 194, 138, 8, 246, 199, 208, 53, 203, 242, 236, 150, 157, 212, 107, 224, 140, 153, 243, 170, 56, 143, 168, 235, 251, 5, 92, 7, 9, 122, 160, 148, 71, 29, 186, 155, 104, 141, 202, 207, 215, 248, 253, 31, 85, 196, 169, 35, 135, 146, 58, 154, 216, 231, 39, 34, 227, 28, 180, 97, 179, 87, 63, 41, 105, 70, 252, 118, 144, 225, 0, 37, 172, 67, 139, 219, 220, 149, 185, 136, 128, 159, 244, 250 }, { 124, 22, 177, 103, 216, 97, 186, 42, 143, 225, 125, 134, 153, 1, 71, 172, 230, 117, 190, 213, 82, 38, 205, 68, 215, 250, 13, 12, 131, 226, 237, 11, 148, 123, 67, 107, 146, 49, 56, 28, 61, 100, 142, 152, 55, 79, 206, 39, 191, 135, 204, 171, 109, 16, 129, 232, 127, 35, 234, 162, 3, 47, 203, 244, 202, 175, 130, 179, 6, 87, 84, 207, 121, 247, 233, 53, 137, 120, 160, 140, 180, 245, 31, 72, 102, 188, 166, 193, 183, 43, 253, 40, 89, 65, 4, 30, 0, 85, 106, 150 }, { 220, 245, 128, 195, 152, 233, 187, 122, 41, 196, 63, 151, 24, 52, 142, 237, 181, 44, 143, 218, 215, 211, 235, 106, 75, 97, 83, 118, 251, 90, 101, 68, 23, 92, 209, 184, 7, 236, 61, 162, 219, 253, 250, 159, 64, 139, 46, 246, 224, 157, 116, 134, 188, 174, 252, 103, 135, 197, 173, 45, 42, 25, 255, 129, 56, 193, 55, 247, 141, 115, 199, 2, 154, 200, 254, 126, 191, 147, 206, 194, 105, 161, 34, 222, 88, 185, 32, 148, 179, 145, 112, 0, 33, 221, 225, 146, 53, 77, 20, 153 }, { 144, 27, 6, 29, 85, 42, 179, 176, 200, 198, 161, 58, 46, 149, 51, 160, 252, 59, 89, 238, 132, 246, 165, 172, 151, 215, 55, 240, 61, 131, 229, 116, 212, 30, 145, 150, 194, 105, 104, 141, 99, 196, 130, 169, 204, 137, 60, 234, 20, 216, 114, 156, 48, 90, 110, 9, 118, 112, 120, 82, 164, 250, 75, 38, 171, 255, 79, 1, 192, 126, 2, 193, 76, 54, 37, 111, 226, 69, 109, 22, 236, 254, 122, 233, 47, 136, 138, 189, 32, 227, 7, 139, 129, 230, 184, 117, 67, 103, 124, 15 }, { 153, 152, 134, 24, 92, 192, 86, 106, 28, 11, 119, 208, 103, 143, 109, 198, 135, 18, 48, 44, 83, 61, 118, 64, 0, 120, 104, 75, 231, 52, 95, 193, 157, 214, 240, 252, 255, 46, 161, 128, 146, 30, 147, 126, 200, 58, 164, 243, 191, 131, 56, 182, 150, 145, 39, 82, 180, 237, 123, 105, 163, 7, 175, 77, 139, 241, 206, 225, 155, 31, 8, 242, 154, 3, 89, 132, 141, 54, 99, 235, 34, 186, 36, 102, 68, 223, 14, 67, 121, 63, 188, 203, 159, 179, 209, 122, 215, 246, 171, 124 }, { 180, 244, 141, 119, 65, 11, 46, 240, 38, 2, 231, 163, 222, 234, 113, 31, 79, 183, 48, 145, 60, 55, 42, 144, 92, 102, 224, 218, 7, 203, 159, 158, 44, 176, 196, 59, 26, 182, 106, 97, 200, 212, 170, 51, 219, 127, 186, 206, 64, 174, 235, 137, 167, 111, 162, 230, 213, 19, 74, 12, 133, 28, 136, 69, 88, 112, 147, 245, 173, 223, 29, 34, 10, 198, 99, 216, 148, 18, 160, 117, 197, 53, 187, 58, 232, 54, 9, 45, 100, 166, 195, 109, 155, 143, 50, 128, 150, 84, 194, 8 }, { 175, 177, 59, 162, 129, 146, 219, 106, 84, 168, 30, 225, 62, 153, 22, 98, 220, 247, 42, 40, 110, 189, 197, 236, 142, 60, 141, 233, 51, 240, 94, 97, 53, 164, 74, 31, 132, 179, 139, 226, 248, 200, 156, 207, 244, 208, 121, 227, 23, 221, 118, 103, 64, 204, 136, 192, 34, 217, 229, 33, 127, 89, 211, 7, 66, 145, 28, 245, 83, 214, 180, 222, 199, 172, 76, 193, 56, 249, 195, 87, 243, 216, 18, 166, 115, 50, 223, 19, 203, 176, 57, 239, 120, 152, 252, 144, 242, 67, 235, 63 }, { 12, 147, 246, 157, 66, 136, 76, 41, 141, 121, 241, 20, 6, 227, 143, 107, 37, 210, 83, 154, 85, 109, 133, 44, 48, 10, 77, 1, 245, 108, 213, 194, 138, 217, 40, 160, 253, 65, 103, 226, 184, 169, 206, 239, 17, 130, 225, 251, 211, 32, 23, 149, 172, 179, 93, 254, 191, 222, 74, 127, 139, 248, 205, 16, 54, 110, 152, 7, 201, 161, 24, 96, 204, 81, 207, 236, 18, 192, 56, 71, 100, 104, 86, 84, 250, 237, 19, 3, 90, 195, 228, 45, 8, 94, 5, 166, 30, 144, 212, 72 }, { 185, 34, 56, 115, 210, 71, 7, 38, 160, 197, 196, 254, 73, 241, 242, 112, 201, 87, 136, 187, 198, 101, 21, 164, 177, 36, 176, 221, 168, 43, 25, 128, 170, 93, 86, 150, 191, 156, 179, 233, 64, 29, 240, 190, 5, 91, 158, 72, 23, 245, 223, 44, 208, 123, 237, 200, 74, 40, 153, 141, 205, 209, 246, 51, 138, 203, 47, 239, 247, 17, 131, 222, 121, 37, 120, 2, 161, 117, 135, 193, 192, 81, 99, 204, 0, 80, 67, 213, 9, 31, 219, 59, 148, 188, 243, 165, 214, 146, 52, 60 }, { 116, 135, 66, 227, 138, 149, 211, 93, 163, 200, 104, 85, 155, 108, 38, 8, 51, 82, 216, 21, 251, 126, 196, 154, 185, 76, 183, 249, 112, 64, 225, 62, 9, 181, 125, 61, 240, 63, 114, 16, 134, 221, 214, 80, 56, 30, 190, 59, 224, 18, 102, 27, 152, 219, 151, 31, 115, 83, 79, 50, 3, 188, 122, 177, 206, 231, 229, 39, 49, 32, 218, 180, 98, 145, 143, 255, 235, 133, 92, 67, 26, 230, 43, 203, 212, 91, 168, 44, 186, 205, 48, 144, 160, 150, 167, 74, 136, 173, 94, 195 }, { 173, 152, 185, 206, 116, 218, 248, 59, 147, 92, 33, 176, 45, 226, 232, 192, 80, 113, 242, 63, 102, 67, 181, 60, 8, 84, 203, 69, 150, 187, 228, 177, 0, 167, 201, 190, 128, 239, 157, 196, 165, 110, 78, 86, 37, 62, 76, 156, 174, 194, 253, 182, 238, 126, 212, 236, 106, 98, 39, 195, 48, 180, 131, 22, 223, 18, 55, 108, 77, 24, 47, 12, 233, 205, 251, 5, 127, 217, 44, 244, 164, 11, 38, 1, 130, 34, 52, 249, 120, 49, 136, 112, 183, 42, 230, 240, 43, 186, 90, 124 }, { 142, 226, 115, 169, 243, 50, 129, 136, 196, 188, 102, 1, 48, 4, 59, 203, 75, 125, 98, 215, 15, 95, 210, 80, 232, 168, 223, 254, 69, 239, 175, 84, 87, 225, 34, 28, 208, 62, 66, 77, 122, 97, 146, 25, 58, 30, 147, 42, 153, 24, 163, 119, 165, 107, 199, 6, 41, 202, 174, 23, 124, 205, 19, 81, 255, 143, 167, 180, 2, 154, 214, 151, 121, 171, 126, 113, 123, 60, 85, 184, 96, 104, 155, 190, 200, 57, 187, 245, 182, 73, 89, 16, 185, 191, 159, 71, 128, 249, 40, 217 }, { 137, 59, 75, 114, 131, 248, 48, 39, 88, 203, 123, 3, 103, 98, 115, 153, 180, 234, 194, 85, 141, 16, 187, 108, 41, 8, 63, 4, 163, 25, 193, 160, 185, 32, 135, 116, 207, 201, 182, 168, 215, 140, 179, 143, 208, 238, 19, 214, 162, 247, 191, 254, 34, 161, 27, 11, 159, 239, 249, 229, 158, 101, 128, 0, 20, 53, 122, 232, 112, 183, 237, 155, 144, 236, 43, 55, 223, 198, 125, 83, 118, 233, 13, 250, 136, 149, 197, 184, 51, 69, 139, 190, 92, 132, 36, 65, 56, 93, 33, 47 }, { 2, 37, 157, 183, 168, 59, 10, 113, 7, 199, 26, 219, 127, 134, 181, 0, 162, 44, 159, 245, 105, 69, 148, 173, 13, 82, 214, 16, 169, 165, 255, 21, 135, 58, 50, 170, 176, 205, 22, 191, 115, 41, 241, 139, 51, 244, 36, 182, 109, 80, 94, 224, 243, 11, 151, 62, 178, 254, 203, 251, 247, 18, 198, 86, 23, 221, 73, 144, 76, 20, 79, 154, 72, 33, 15, 64, 39, 32, 8, 202, 129, 249, 29, 104, 225, 52, 78, 85, 28, 236, 114, 77, 210, 108, 142, 201, 27, 106, 250, 57 }, { 144, 250, 154, 8, 68, 10, 131, 222, 198, 246, 71, 143, 126, 227, 78, 172, 233, 155, 62, 220, 204, 91, 237, 2, 61, 6, 16, 98, 105, 145, 52, 152, 177, 219, 191, 133, 149, 48, 252, 208, 51, 211, 99, 207, 56, 229, 203, 114, 195, 96, 35, 102, 248, 130, 37, 251, 80, 38, 146, 205, 231, 196, 33, 26, 174, 181, 218, 139, 254, 118, 66, 41, 55, 87, 223, 202, 184, 175, 53, 125, 111, 110, 17, 141, 106, 164, 79, 166, 238, 163, 100, 221, 49, 74, 3, 234, 59, 228, 189, 31 }, { 95, 22, 152, 75, 121, 117, 3, 91, 66, 168, 112, 243, 27, 217, 122, 176, 98, 53, 43, 131, 251, 96, 62, 44, 242, 36, 115, 236, 202, 60, 161, 47, 129, 46, 119, 238, 2, 224, 154, 138, 229, 118, 190, 208, 125, 180, 197, 71, 35, 219, 38, 223, 63, 204, 31, 58, 68, 136, 23, 107, 194, 37, 234, 114, 169, 82, 1, 185, 150, 42, 227, 10, 120, 244, 165, 231, 157, 80, 147, 12, 226, 128, 205, 159, 78, 4, 175, 26, 188, 148, 109, 106, 248, 32, 99, 29, 93, 225, 81, 149 }, { 148, 96, 11, 4, 189, 64, 146, 75, 18, 106, 234, 114, 88, 204, 214, 209, 160, 19, 22, 250, 83, 169, 105, 153, 110, 152, 17, 233, 206, 74, 131, 228, 103, 90, 144, 130, 94, 35, 185, 102, 240, 61, 92, 168, 27, 203, 86, 197, 190, 182, 68, 162, 172, 166, 79, 95, 236, 87, 89, 119, 66, 71, 175, 229, 33, 217, 205, 163, 101, 194, 244, 57, 134, 154, 76, 38, 104, 136, 8, 15, 97, 150, 237, 31, 107, 158, 6, 159, 51, 174, 220, 49, 85, 13, 42, 77, 230, 0, 113, 82 }, { 98, 200, 25, 154, 83, 125, 224, 146, 30, 128, 188, 18, 238, 104, 96, 72, 144, 160, 43, 91, 131, 121, 107, 73, 100, 230, 42, 126, 223, 111, 19, 218, 80, 174, 178, 87, 202, 94, 34, 76, 136, 92, 179, 182, 159, 60, 152, 59, 247, 70, 41, 145, 197, 209, 236, 17, 90, 219, 52, 210, 21, 233, 93, 176, 251, 220, 194, 28, 133, 189, 95, 190, 106, 208, 99, 11, 212, 240, 109, 10, 62, 185, 217, 157, 1, 201, 84, 134, 69, 47, 161, 129, 158, 27, 54, 193, 46, 135, 221, 22 }, { 108, 249, 83, 178, 181, 191, 182, 23, 105, 241, 213, 232, 146, 211, 113, 123, 9, 128, 59, 32, 51, 104, 114, 61, 159, 189, 107, 28, 78, 137, 35, 165, 176, 12, 180, 112, 148, 24, 76, 81, 120, 101, 102, 71, 22, 44, 223, 85, 228, 77, 205, 47, 84, 136, 143, 65, 88, 172, 253, 40, 225, 239, 226, 244, 169, 125, 215, 103, 16, 162, 231, 217, 197, 115, 91, 73, 66, 224, 230, 171, 7, 6, 204, 140, 147, 208, 48, 175, 164, 60, 157, 74, 117, 154, 129, 43, 173, 156, 219, 96 }, { 170, 114, 209, 3, 199, 40, 153, 4, 54, 108, 123, 215, 22, 135, 141, 173, 203, 207, 88, 110, 221, 79, 146, 99, 254, 124, 240, 120, 104, 132, 211, 78, 151, 100, 38, 83, 11, 206, 150, 138, 64, 175, 52, 159, 48, 177, 247, 205, 81, 50, 219, 41, 200, 232, 230, 184, 224, 213, 0, 37, 82, 46, 17, 239, 93, 156, 87, 139, 241, 152, 58, 250, 208, 102, 178, 85, 7, 245, 23, 6, 9, 128, 130, 194, 169, 74, 21, 59, 166, 210, 220, 69, 61, 147, 77, 204, 243, 105, 89, 101 }, { 147, 254, 53, 173, 239, 189, 142, 225, 192, 47, 109, 68, 155, 93, 57, 4, 135, 56, 27, 108, 31, 58, 251, 194, 154, 15, 88, 227, 128, 235, 67, 210, 46, 172, 157, 242, 22, 188, 171, 130, 131, 166, 52, 19, 11, 200, 24, 110, 123, 62, 161, 55, 217, 222, 29, 95, 167, 231, 238, 21, 42, 229, 246, 1, 76, 101, 203, 237, 241, 74, 216, 84, 80, 119, 48, 105, 253, 71, 14, 116, 72, 178, 205, 226, 60, 117, 138, 9, 153, 91, 136, 99, 182, 118, 0, 139, 159, 66, 212, 35 }, { 193, 62, 172, 123, 94, 253, 213, 241, 97, 104, 160, 131, 252, 158, 31, 198, 181, 205, 201, 25, 163, 173, 8, 95, 164, 233, 6, 91, 41, 229, 215, 254, 14, 222, 105, 117, 176, 137, 65, 211, 80, 135, 177, 68, 157, 168, 18, 103, 28, 47, 110, 99, 69, 239, 81, 40, 132, 149, 183, 169, 7, 190, 75, 21, 143, 44, 246, 166, 50, 237, 42, 78, 151, 9, 226, 46, 4, 54, 203, 214, 101, 53, 240, 189, 152, 34, 243, 88, 186, 3, 126, 43, 84, 180, 73, 154, 2, 11, 55, 15 }, { 19, 45, 212, 9, 88, 194, 252, 121, 203, 27, 109, 117, 224, 20, 195, 127, 130, 223, 66, 34, 169, 233, 216, 159, 93, 180, 98, 92, 99, 10, 210, 90, 102, 191, 94, 225, 181, 108, 245, 142, 238, 103, 192, 237, 228, 24, 22, 35, 6, 176, 46, 187, 201, 250, 214, 33, 87, 111, 172, 186, 126, 236, 57, 62, 80, 31, 145, 232, 124, 122, 104, 134, 240, 14, 70, 217, 48, 199, 30, 21, 75, 43, 254, 106, 85, 41, 207, 234, 60, 110, 56, 119, 36, 50, 219, 167, 178, 189, 183, 200 }, { 250, 17, 33, 157, 231, 245, 227, 156, 160, 30, 88, 63, 199, 161, 78, 218, 138, 12, 1, 191, 244, 208, 62, 77, 54, 120, 221, 252, 222, 176, 10, 164, 89, 81, 184, 76, 144, 107, 99, 40, 246, 47, 67, 79, 48, 251, 13, 183, 249, 45, 44, 32, 97, 121, 152, 159, 38, 132, 20, 189, 9, 243, 73, 140, 168, 235, 142, 26, 216, 35, 65, 225, 146, 209, 58, 217, 8, 151, 83, 3, 125, 93, 22, 34, 233, 167, 181, 106, 55, 213, 24, 4, 112, 197, 149, 177, 23, 133, 198, 39 }, { 100, 98, 70, 253, 150, 241, 42, 165, 88, 123, 144, 30, 169, 220, 138, 80, 90, 234, 106, 189, 207, 26, 109, 47, 247, 224, 164, 163, 214, 61, 40, 217, 79, 0, 167, 200, 92, 134, 16, 112, 255, 45, 91, 44, 181, 56, 125, 53, 152, 71, 186, 81, 37, 19, 118, 97, 196, 38, 129, 76, 120, 133, 104, 124, 25, 151, 162, 35, 140, 111, 34, 12, 22, 48, 7, 108, 13, 121, 122, 205, 136, 46, 132, 119, 130, 87, 147, 105, 197, 160, 23, 248, 1, 2, 171, 168, 58, 222, 116, 17 }, { 109, 247, 174, 252, 133, 19, 192, 97, 132, 80, 65, 13, 56, 54, 179, 74, 82, 204, 157, 146, 127, 92, 211, 238, 234, 235, 116, 53, 202, 130, 32, 180, 138, 131, 232, 230, 87, 38, 175, 0, 44, 67, 51, 35, 73, 220, 187, 151, 186, 22, 49, 139, 148, 94, 71, 69, 3, 24, 79, 195, 95, 218, 15, 200, 150, 244, 128, 91, 39, 88, 173, 46, 165, 228, 194, 199, 36, 221, 77, 14, 31, 1, 125, 41, 152, 193, 7, 61, 63, 254, 223, 154, 189, 159, 210, 231, 144, 47, 250, 112 }, { 115, 55, 161, 107, 19, 59, 250, 93, 37, 31, 94, 42, 154, 87, 232, 243, 123, 244, 136, 34, 105, 43, 78, 165, 64, 75, 100, 140, 30, 7, 109, 69, 224, 168, 195, 135, 236, 174, 246, 210, 52, 145, 95, 157, 119, 110, 204, 197, 207, 211, 238, 192, 249, 237, 213, 62, 89, 181, 217, 146, 158, 39, 104, 68, 81, 70, 202, 90, 169, 159, 172, 80, 227, 57, 223, 255, 186, 1, 101, 44, 193, 98, 36, 54, 144, 24, 61, 96, 82, 88, 139, 183, 245, 184, 2, 216, 58, 177, 130, 120 }, { 196, 90, 73, 97, 177, 17, 216, 112, 88, 48, 27, 184, 119, 41, 254, 182, 29, 244, 28, 136, 118, 222, 89, 120, 213, 246, 253, 167, 190, 101, 43, 243, 130, 51, 68, 61, 192, 170, 19, 84, 189, 127, 47, 181, 229, 132, 55, 252, 52, 235, 150, 36, 67, 147, 164, 114, 6, 74, 121, 25, 71, 54, 228, 211, 66, 146, 4, 185, 11, 168, 35, 2, 205, 70, 203, 7, 126, 178, 78, 172, 141, 131, 180, 206, 72, 245, 86, 116, 46, 1, 144, 145, 106, 225, 76, 165, 215, 156, 91, 239 }, { 26, 219, 54, 140, 31, 17, 114, 58, 220, 222, 93, 238, 254, 60, 102, 115, 226, 236, 185, 53, 192, 245, 112, 244, 101, 247, 13, 171, 32, 160, 121, 174, 197, 87, 193, 45, 255, 77, 156, 78, 214, 126, 144, 148, 175, 196, 215, 252, 216, 40, 184, 163, 66, 218, 251, 168, 21, 199, 118, 23, 82, 3, 37, 104, 145, 154, 179, 46, 165, 0, 85, 237, 195, 106, 100, 176, 9, 38, 187, 42, 35, 5, 213, 68, 88, 56, 84, 183, 134, 34, 15, 227, 191, 130, 12, 69, 240, 72, 55, 108 }, { 99, 51, 145, 147, 229, 119, 188, 204, 49, 160, 168, 97, 118, 23, 130, 22, 114, 30, 15, 42, 9, 74, 186, 57, 245, 140, 116, 177, 136, 81, 87, 50, 88, 173, 159, 228, 174, 222, 53, 94, 163, 83, 241, 139, 121, 75, 25, 150, 73, 137, 233, 107, 158, 132, 240, 227, 251, 180, 3, 209, 206, 242, 216, 244, 109, 89, 135, 117, 112, 185, 33, 190, 176, 214, 7, 1, 35, 67, 129, 54, 189, 201, 217, 223, 179, 120, 154, 208, 105, 232, 62, 90, 19, 252, 191, 77, 10, 205, 225, 181 }, { 192, 1, 218, 13, 73, 72, 66, 231, 255, 224, 70, 4, 195, 97, 53, 108, 112, 77, 194, 42, 138, 12, 191, 71, 225, 181, 196, 102, 9, 2, 163, 230, 56, 217, 99, 87, 227, 107, 174, 90, 61, 166, 153, 142, 91, 106, 183, 236, 253, 44, 36, 157, 175, 41, 3, 121, 250, 50, 10, 204, 86, 170, 245, 132, 96, 27, 152, 209, 219, 34, 78, 64, 156, 141, 28, 164, 39, 30, 226, 134, 147, 129, 40, 161, 244, 252, 68, 187, 201, 169, 229, 81, 76, 159, 58, 172, 52, 32, 43, 35 }, { 128, 253, 105, 166, 24, 135, 200, 252, 159, 34, 231, 230, 44, 205, 111, 160, 228, 226, 184, 221, 27, 213, 202, 19, 8, 132, 171, 212, 152, 23, 25, 18, 20, 247, 50, 82, 251, 62, 86, 124, 236, 48, 99, 74, 129, 173, 144, 4, 179, 101, 67, 137, 234, 92, 116, 29, 118, 46, 170, 161, 193, 66, 157, 43, 224, 102, 106, 94, 175, 198, 96, 5, 28, 219, 1, 223, 145, 151, 121, 71, 185, 57, 90, 164, 38, 199, 216, 218, 222, 188, 9, 15, 79, 65, 100, 31, 155, 32, 108, 21 }, { 238, 79, 135, 55, 250, 160, 21, 202, 224, 51, 139, 92, 140, 111, 234, 205, 68, 200, 115, 197, 130, 170, 89, 35, 84, 239, 113, 157, 54, 153, 226, 91, 99, 180, 133, 175, 17, 103, 13, 29, 1, 129, 42, 228, 32, 66, 50, 240, 206, 143, 19, 15, 131, 104, 196, 75, 25, 230, 209, 142, 210, 220, 145, 30, 57, 246, 178, 39, 182, 10, 98, 85, 154, 107, 77, 56, 144, 179, 61, 109, 156, 27, 100, 71, 110, 231, 198, 136, 38, 44, 177, 229, 45, 161, 195, 60, 102, 33, 95, 36 }, { 125, 133, 30, 228, 232, 66, 31, 82, 196, 249, 242, 103, 3, 211, 42, 152, 10, 90, 247, 146, 245, 158, 39, 151, 134, 159, 176, 208, 165, 32, 116, 84, 0, 206, 98, 12, 91, 130, 119, 57, 224, 144, 150, 56, 251, 80, 109, 14, 43, 118, 252, 33, 233, 213, 148, 81, 207, 189, 156, 230, 236, 136, 145, 117, 62, 89, 8, 238, 226, 219, 76, 174, 220, 44, 183, 202, 94, 199, 68, 13, 28, 157, 178, 185, 131, 79, 41, 6, 227, 210, 129, 172, 17, 167, 52, 100, 248, 177, 85, 1 }, { 171, 88, 69, 71, 5, 137, 235, 59, 68, 174, 185, 151, 168, 16, 231, 144, 241, 113, 38, 230, 244, 165, 80, 253, 20, 156, 125, 107, 106, 245, 211, 146, 210, 212, 22, 37, 200, 62, 150, 209, 110, 169, 164, 124, 53, 51, 254, 41, 11, 94, 50, 197, 74, 223, 255, 138, 35, 109, 145, 189, 195, 242, 175, 208, 198, 251, 64, 27, 161, 12, 7, 204, 123, 142, 214, 108, 128, 40, 134, 194, 120, 243, 148, 188, 33, 77, 15, 224, 79, 179, 65, 158, 54, 219, 217, 180, 173, 105, 205, 18 }, { 221, 60, 236, 11, 36, 92, 17, 119, 32, 81, 122, 253, 202, 76, 13, 206, 0, 252, 235, 229, 126, 70, 214, 234, 12, 105, 97, 218, 22, 96, 46, 226, 121, 37, 145, 167, 220, 193, 178, 25, 187, 3, 21, 49, 185, 180, 231, 195, 165, 94, 243, 107, 10, 210, 239, 39, 115, 159, 79, 15, 24, 4, 71, 139, 147, 204, 28, 188, 201, 251, 158, 175, 7, 157, 192, 240, 52, 199, 58, 62, 249, 223, 100, 217, 113, 196, 212, 33, 59, 35, 137, 112, 153, 176, 224, 216, 244, 87, 174, 73 }, { 70, 72, 119, 63, 145, 13, 47, 66, 69, 77, 237, 213, 184, 241, 112, 64, 242, 56, 234, 85, 122, 103, 211, 216, 155, 22, 111, 186, 129, 208, 193, 204, 133, 136, 148, 75, 40, 127, 137, 6, 65, 104, 100, 162, 3, 93, 158, 110, 143, 96, 12, 217, 180, 179, 255, 151, 243, 29, 38, 88, 139, 253, 152, 131, 182, 54, 149, 177, 209, 102, 232, 79, 154, 250, 132, 24, 26, 244, 176, 138, 201, 123, 53, 91, 142, 135, 121, 225, 43, 62, 76, 19, 195, 14, 187, 171, 229, 74, 254, 196 }, { 46, 209, 150, 199, 164, 38, 222, 5, 212, 4, 167, 224, 195, 16, 75, 29, 225, 182, 153, 89, 194, 191, 48, 122, 118, 111, 186, 112, 239, 90, 120, 163, 36, 116, 147, 217, 87, 181, 23, 141, 241, 145, 47, 6, 252, 117, 245, 54, 12, 40, 178, 45, 85, 220, 160, 130, 143, 73, 236, 172, 96, 80, 106, 131, 110, 221, 144, 185, 26, 244, 213, 52, 228, 174, 33, 251, 99, 30, 170, 226, 157, 19, 27, 114, 161, 0, 3, 203, 123, 230, 234, 13, 255, 24, 113, 204, 39, 66, 43, 140 }, { 82, 56, 141, 237, 76, 151, 41, 1, 51, 149, 104, 204, 33, 5, 43, 219, 17, 221, 36, 20, 71, 180, 184, 97, 6, 202, 60, 195, 192, 230, 171, 242, 193, 144, 188, 199, 244, 110, 64, 248, 22, 234, 30, 119, 84, 117, 120, 209, 66, 21, 53, 176, 249, 75, 58, 153, 178, 3, 239, 227, 27, 159, 206, 81, 175, 93, 37, 42, 136, 185, 132, 146, 122, 160, 47, 123, 96, 61, 130, 34, 50, 161, 154, 11, 255, 0, 246, 72, 78, 54, 134, 145, 194, 89, 229, 150, 88, 210, 99, 86 }, { 162, 154, 85, 140, 155, 12, 146, 9, 71, 82, 168, 107, 67, 6, 123, 226, 180, 93, 249, 83, 95, 190, 3, 252, 130, 206, 59, 55, 193, 183, 50, 233, 77, 58, 239, 156, 73, 174, 136, 161, 81, 43, 122, 26, 216, 21, 49, 133, 158, 36, 11, 176, 28, 104, 163, 129, 29, 91, 247, 145, 108, 75, 152, 113, 186, 124, 92, 244, 237, 57, 125, 119, 0, 246, 69, 166, 224, 65, 41, 118, 198, 64, 221, 14, 214, 217, 51, 112, 116, 70, 194, 192, 255, 207, 10, 19, 134, 230, 4, 173 }, { 54, 237, 68, 88, 122, 192, 35, 65, 229, 170, 113, 111, 39, 168, 211, 129, 26, 7, 1, 125, 97, 202, 182, 213, 107, 120, 164, 132, 64, 14, 20, 21, 255, 40, 121, 126, 140, 180, 177, 127, 147, 152, 25, 155, 183, 158, 212, 178, 33, 175, 13, 96, 220, 225, 187, 117, 217, 191, 53, 233, 207, 16, 2, 235, 204, 208, 32, 56, 161, 24, 137, 0, 45, 87, 116, 138, 100, 109, 150, 130, 245, 12, 105, 34, 85, 185, 93, 189, 167, 131, 3, 146, 52, 128, 228, 73, 218, 95, 136, 115 }, { 110, 39, 98, 173, 65, 240, 2, 55, 107, 185, 153, 238, 24, 120, 249, 206, 242, 127, 247, 177, 64, 176, 129, 202, 191, 140, 207, 121, 193, 74, 147, 135, 89, 167, 44, 93, 12, 15, 164, 216, 99, 3, 117, 11, 23, 124, 62, 251, 197, 250, 236, 94, 196, 63, 71, 81, 172, 241, 228, 43, 203, 32, 168, 218, 154, 132, 115, 20, 255, 144, 230, 137, 205, 183, 163, 131, 253, 219, 235, 180, 7, 53, 220, 198, 148, 10, 215, 171, 212, 84, 151, 227, 157, 152, 95, 139, 195, 108, 18, 224 }, { 142, 83, 35, 39, 125, 90, 45, 68, 29, 19, 61, 8, 184, 173, 100, 13, 18, 62, 212, 203, 52, 181, 67, 171, 127, 49, 55, 109, 214, 224, 85, 27, 202, 189, 228, 57, 226, 92, 235, 99, 218, 227, 245, 74, 4, 191, 79, 219, 149, 103, 42, 175, 148, 14, 152, 252, 178, 156, 51, 120, 253, 114, 21, 139, 124, 122, 48, 89, 210, 117, 223, 160, 167, 244, 205, 211, 216, 140, 241, 172, 0, 217, 248, 54, 30, 146, 153, 131, 161, 230, 91, 135, 240, 10, 197, 157, 225, 44, 70, 43 }, { 182, 180, 197, 93, 43, 204, 8, 116, 234, 157, 14, 136, 168, 254, 165, 63, 154, 247, 20, 149, 139, 83, 94, 134, 140, 223, 231, 253, 220, 215, 86, 184, 251, 50, 45, 57, 33, 207, 79, 1, 105, 74, 67, 233, 179, 99, 25, 218, 24, 111, 34, 132, 72, 208, 209, 121, 32, 230, 191, 120, 31, 55, 12, 226, 246, 175, 141, 47, 114, 128, 39, 144, 127, 199, 19, 23, 161, 194, 131, 159, 187, 122, 84, 90, 133, 98, 42, 13, 28, 92, 26, 60, 135, 172, 224, 225, 227, 95, 129, 113 }, { 151, 68, 90, 227, 211, 21, 223, 184, 46, 237, 221, 82, 186, 48, 27, 17, 95, 24, 43, 86, 173, 178, 246, 5, 91, 99, 109, 245, 251, 153, 31, 47, 191, 34, 72, 214, 229, 49, 53, 176, 204, 52, 180, 45, 242, 70, 75, 50, 234, 167, 145, 241, 140, 62, 219, 76, 30, 15, 110, 124, 116, 206, 224, 205, 132, 108, 19, 253, 240, 197, 38, 166, 248, 14, 160, 26, 131, 63, 128, 199, 16, 71, 11, 28, 208, 104, 185, 201, 32, 162, 243, 146, 103, 77, 107, 0, 88, 231, 117, 230 }, { 78, 185, 212, 198, 3, 147, 144, 56, 66, 90, 238, 158, 10, 227, 73, 47, 55, 53, 207, 230, 89, 165, 217, 46, 128, 54, 219, 245, 164, 107, 248, 249, 199, 127, 203, 172, 103, 126, 131, 95, 112, 173, 186, 87, 113, 44, 41, 153, 187, 18, 134, 35, 175, 1, 142, 43, 23, 98, 67, 117, 237, 27, 40, 196, 71, 51, 64, 171, 92, 232, 160, 109, 235, 77, 69, 221, 255, 25, 241, 218, 4, 99, 180, 148, 60, 220, 8, 254, 223, 140, 111, 136, 91, 169, 81, 197, 231, 57, 30, 204 }, { 134, 30, 107, 53, 45, 19, 2, 208, 181, 67, 192, 104, 166, 214, 10, 212, 105, 162, 84, 137, 6, 131, 245, 111, 38, 204, 194, 146, 199, 226, 3, 95, 252, 23, 178, 129, 168, 228, 109, 35, 254, 230, 16, 63, 196, 155, 14, 18, 135, 240, 115, 144, 136, 238, 33, 153, 51, 255, 102, 25, 116, 121, 154, 86, 151, 69, 227, 236, 41, 232, 59, 247, 253, 219, 234, 179, 90, 26, 85, 195, 140, 12, 165, 150, 29, 183, 205, 122, 187, 114, 31, 243, 250, 163, 68, 170, 225, 66, 60, 99 }, { 88, 40, 203, 63, 114, 216, 109, 250, 117, 94, 245, 209, 47, 236, 101, 98, 62, 41, 35, 52, 113, 27, 49, 84, 50, 249, 238, 69, 70, 44, 184, 97, 252, 10, 173, 39, 33, 147, 54, 102, 25, 200, 3, 139, 24, 64, 237, 222, 78, 136, 153, 81, 60, 103, 138, 241, 17, 77, 135, 29, 7, 125, 76, 12, 181, 226, 95, 65, 174, 217, 74, 37, 104, 148, 89, 59, 150, 42, 13, 235, 234, 99, 247, 196, 243, 121, 154, 231, 36, 93, 92, 176, 210, 66, 85, 229, 46, 111, 214, 75 }, { 166, 96, 209, 239, 1, 191, 217, 251, 35, 163, 59, 116, 206, 254, 205, 148, 68, 147, 242, 143, 69, 244, 141, 22, 82, 52, 121, 118, 81, 45, 28, 2, 36, 162, 91, 90, 34, 253, 38, 131, 223, 133, 216, 207, 119, 102, 72, 225, 13, 149, 161, 39, 8, 249, 184, 114, 44, 132, 180, 227, 88, 57, 89, 109, 10, 155, 33, 201, 94, 137, 74, 20, 60, 177, 198, 220, 48, 168, 0, 212, 196, 160, 189, 150, 15, 43, 136, 4, 110, 250, 128, 230, 92, 86, 228, 12, 93, 97, 75, 197 }, { 36, 194, 255, 225, 131, 39, 34, 1, 228, 192, 14, 149, 50, 180, 140, 148, 186, 171, 248, 244, 105, 42, 98, 95, 3, 114, 99, 124, 12, 66, 107, 43, 137, 216, 5, 218, 56, 103, 198, 109, 199, 185, 133, 190, 252, 32, 224, 52, 161, 121, 41, 213, 232, 47, 37, 227, 146, 69, 102, 196, 46, 40, 238, 178, 207, 61, 0, 147, 129, 9, 97, 251, 184, 126, 189, 67, 144, 209, 73, 93, 127, 239, 17, 54, 19, 96, 155, 85, 88, 240, 167, 182, 80, 72, 106, 71, 203, 112, 234, 177 }, { 82, 148, 169, 138, 79, 230, 88, 181, 46, 206, 3, 163, 19, 149, 67, 197, 139, 2, 30, 188, 98, 195, 246, 17, 99, 44, 5, 76, 251, 135, 214, 8, 65, 10, 34, 53, 146, 29, 37, 106, 11, 218, 249, 48, 63, 118, 176, 175, 219, 231, 157, 75, 201, 172, 145, 84, 7, 154, 178, 186, 89, 165, 49, 144, 66, 187, 182, 221, 23, 180, 253, 220, 202, 112, 216, 72, 244, 200, 247, 228, 105, 173, 9, 52, 45, 151, 147, 96, 71, 199, 87, 97, 143, 236, 56, 6, 102, 229, 81, 194 }, { 79, 115, 139, 50, 65, 44, 116, 14, 49, 90, 67, 209, 224, 2, 59, 104, 69, 199, 206, 119, 136, 212, 21, 211, 190, 164, 31, 137, 55, 204, 177, 186, 197, 195, 86, 74, 92, 33, 85, 239, 118, 158, 196, 237, 36, 57, 220, 11, 149, 62, 30, 169, 180, 188, 82, 28, 181, 95, 189, 251, 122, 100, 253, 191, 94, 120, 89, 106, 15, 179, 138, 200, 227, 159, 39, 155, 77, 34, 63, 29, 1, 223, 175, 96, 216, 245, 46, 142, 140, 153, 201, 8, 124, 194, 35, 20, 152, 87, 109, 187 }, { 165, 45, 144, 43, 145, 174, 207, 92, 137, 161, 196, 203, 79, 182, 160, 113, 30, 140, 110, 236, 117, 179, 173, 50, 49, 139, 199, 70, 123, 175, 130, 163, 82, 183, 138, 16, 168, 176, 237, 55, 46, 5, 212, 189, 128, 220, 11, 35, 116, 9, 27, 230, 248, 91, 17, 36, 114, 221, 6, 171, 104, 208, 206, 131, 8, 214, 111, 213, 141, 125, 219, 146, 14, 227, 75, 194, 102, 136, 101, 172, 157, 251, 33, 85, 135, 64, 167, 222, 119, 159, 37, 122, 3, 255, 71, 41, 240, 204, 201, 93 }, { 194, 192, 102, 86, 38, 58, 200, 97, 15, 227, 158, 146, 105, 210, 142, 159, 197, 74, 251, 220, 93, 184, 189, 9, 213, 6, 162, 113, 88, 41, 229, 173, 183, 94, 63, 168, 22, 123, 177, 163, 195, 81, 80, 250, 53, 150, 56, 101, 76, 156, 35, 65, 239, 219, 124, 242, 133, 98, 70, 190, 0, 134, 175, 236, 241, 185, 186, 244, 87, 57, 117, 127, 180, 228, 246, 216, 165, 223, 60, 36, 95, 233, 125, 75, 252, 255, 182, 148, 91, 39, 13, 92, 161, 109, 170, 33, 137, 37, 208, 129 }, { 50, 179, 188, 200, 124, 199, 231, 223, 12, 158, 1, 194, 135, 96, 67, 198, 27, 70, 90, 9, 112, 146, 209, 241, 159, 56, 54, 165, 98, 214, 148, 103, 207, 111, 134, 100, 252, 141, 18, 16, 34, 93, 104, 166, 212, 79, 66, 129, 84, 175, 40, 201, 23, 0, 229, 59, 147, 20, 184, 215, 143, 232, 72, 130, 245, 244, 178, 249, 123, 5, 28, 47, 180, 39, 248, 109, 197, 239, 250, 52, 60, 11, 189, 196, 36, 225, 89, 133, 222, 69, 125, 43, 80, 162, 203, 193, 169, 32, 220, 71 }, { 148, 110, 20, 113, 132, 193, 253, 212, 35, 125, 5, 28, 160, 105, 23, 31, 136, 235, 205, 100, 101, 15, 155, 146, 71, 237, 195, 154, 254, 45, 33, 8, 115, 94, 206, 224, 37, 139, 43, 65, 106, 72, 186, 129, 240, 211, 178, 172, 142, 32, 135, 143, 150, 57, 243, 48, 21, 167, 163, 66, 119, 61, 234, 182, 22, 228, 238, 74, 7, 73, 169, 60, 131, 77, 59, 50, 2, 83, 196, 181, 52, 10, 104, 90, 192, 44, 244, 26, 221, 126, 213, 229, 114, 174, 161, 171, 103, 145, 187, 112 }, { 14, 49, 176, 79, 92, 63, 168, 45, 123, 252, 37, 82, 87, 208, 218, 238, 101, 185, 27, 85, 2, 135, 95, 233, 69, 84, 88, 166, 13, 57, 196, 165, 241, 100, 245, 39, 7, 236, 174, 127, 58, 213, 144, 219, 239, 177, 70, 224, 164, 42, 72, 31, 134, 43, 225, 187, 180, 244, 51, 133, 231, 145, 78, 206, 131, 136, 157, 115, 222, 106, 59, 179, 34, 15, 154, 178, 117, 151, 207, 33, 122, 83, 29, 67, 193, 126, 128, 76, 158, 223, 47, 5, 44, 129, 199, 250, 163, 242, 25, 161 }, { 229, 70, 175, 88, 185, 123, 251, 44, 221, 45, 13, 239, 213, 115, 159, 219, 72, 50, 135, 181, 173, 171, 37, 228, 14, 19, 146, 210, 232, 216, 12, 147, 114, 113, 220, 253, 211, 46, 188, 134, 39, 98, 183, 249, 23, 194, 79, 236, 33, 84, 158, 245, 151, 2, 89, 242, 58, 118, 198, 157, 92, 91, 168, 67, 129, 222, 100, 99, 102, 40, 180, 248, 7, 34, 196, 95, 130, 212, 246, 192, 69, 237, 43, 57, 97, 20, 205, 104, 103, 201, 65, 1, 117, 5, 112, 52, 225, 55, 127, 109 }, { 92, 54, 124, 25, 99, 128, 228, 90, 237, 153, 213, 216, 43, 210, 56, 15, 60, 102, 100, 108, 97, 83, 36, 251, 70, 115, 93, 158, 212, 243, 143, 130, 168, 184, 29, 173, 126, 125, 34, 50, 110, 2, 211, 204, 225, 28, 32, 176, 194, 38, 4, 61, 48, 166, 208, 18, 192, 5, 220, 186, 231, 209, 218, 226, 104, 105, 191, 240, 116, 178, 30, 229, 246, 101, 195, 207, 112, 19, 164, 134, 44, 224, 201, 232, 219, 23, 157, 16, 154, 80, 22, 179, 235, 247, 146, 196, 248, 203, 252, 200 }, { 41, 98, 8, 27, 111, 89, 223, 52, 54, 230, 225, 236, 28, 73, 211, 71, 33, 61, 140, 133, 208, 83, 153, 20, 65, 254, 175, 189, 24, 62, 92, 13, 224, 161, 55, 85, 199, 107, 206, 218, 109, 117, 194, 25, 70, 241, 80, 122, 68, 101, 64, 156, 29, 44, 145, 74, 205, 131, 86, 82, 37, 26, 169, 5, 90, 134, 255, 138, 23, 238, 137, 197, 193, 167, 42, 210, 180, 235, 115, 239, 179, 16, 87, 231, 76, 39, 216, 181, 253, 163, 57, 178, 88, 45, 6, 240, 196, 48, 130, 18 }, { 169, 0, 220, 211, 219, 20, 97, 136, 68, 33, 230, 177, 125, 252, 51, 188, 192, 195, 203, 114, 209, 147, 199, 197, 170, 198, 110, 143, 88, 31, 71, 193, 160, 205, 174, 7, 118, 67, 168, 105, 99, 175, 244, 144, 5, 243, 121, 103, 1, 182, 229, 155, 113, 111, 140, 37, 152, 77, 202, 255, 129, 207, 132, 167, 108, 127, 54, 115, 74, 173, 126, 234, 224, 50, 63, 95, 12, 150, 48, 91, 163, 59, 70, 47, 241, 216, 235, 17, 248, 21, 109, 42, 176, 117, 76, 185, 166, 62, 120, 190 }, { 203, 73, 6, 231, 221, 107, 29, 254, 149, 2, 58, 236, 250, 57, 196, 25, 96, 103, 190, 14, 99, 23, 169, 17, 32, 162, 225, 180, 106, 144, 34, 87, 226, 188, 218, 110, 69, 204, 214, 127, 229, 145, 52, 64, 11, 161, 1, 126, 209, 245, 246, 232, 164, 117, 207, 150, 71, 65, 48, 142, 224, 174, 50, 153, 157, 85, 251, 136, 212, 67, 146, 210, 143, 205, 109, 77, 89, 230, 118, 5, 217, 189, 66, 102, 116, 252, 235, 113, 201, 108, 173, 234, 8, 137, 94, 15, 193, 42, 192, 197 }, { 3, 153, 52, 185, 238, 14, 25, 77, 79, 112, 243, 178, 121, 236, 195, 30, 210, 71, 166, 212, 72, 54, 63, 186, 140, 81, 48, 250, 230, 194, 144, 61, 244, 241, 50, 214, 40, 68, 221, 183, 17, 182, 143, 118, 208, 119, 67, 107, 249, 173, 18, 217, 126, 92, 189, 34, 239, 117, 96, 160, 64, 20, 59, 161, 191, 133, 205, 43, 51, 165, 220, 216, 37, 207, 227, 222, 110, 13, 226, 152, 99, 170, 255, 247, 141, 176, 174, 111, 192, 109, 148, 58, 49, 224, 199, 46, 193, 122, 130, 89 }, { 112, 221, 252, 13, 37, 90, 206, 144, 158, 43, 167, 81, 216, 114, 184, 182, 136, 34, 232, 163, 124, 35, 234, 207, 25, 44, 170, 73, 77, 67, 139, 1, 88, 111, 71, 21, 186, 38, 110, 197, 138, 126, 9, 212, 98, 244, 85, 27, 79, 82, 22, 28, 115, 121, 205, 119, 236, 198, 233, 113, 150, 162, 211, 87, 96, 173, 83, 70, 117, 109, 237, 188, 53, 152, 242, 154, 61, 149, 223, 190, 106, 48, 200, 254, 17, 225, 239, 175, 97, 24, 142, 147, 108, 10, 7, 92, 12, 194, 208, 222 }, { 153, 57, 242, 173, 190, 232, 103, 198, 202, 211, 107, 226, 255, 124, 38, 130, 95, 141, 194, 196, 19, 60, 99, 216, 15, 67, 204, 229, 118, 243, 116, 2, 148, 28, 114, 55, 72, 217, 215, 58, 213, 40, 161, 101, 96, 71, 249, 154, 45, 3, 105, 46, 37, 181, 11, 35, 240, 224, 82, 80, 131, 108, 178, 220, 238, 111, 225, 168, 156, 86, 97, 49, 163, 27, 182, 31, 246, 17, 218, 85, 234, 104, 59, 241, 93, 87, 142, 247, 10, 175, 61, 149, 195, 75, 21, 166, 44, 252, 200, 231 }, { 17, 73, 88, 170, 247, 21, 79, 226, 23, 40, 161, 13, 34, 62, 108, 146, 134, 46, 242, 51, 138, 214, 149, 53, 118, 95, 130, 103, 157, 102, 86, 168, 101, 190, 90, 158, 169, 107, 206, 70, 186, 241, 148, 129, 238, 1, 63, 84, 67, 58, 74, 167, 209, 76, 233, 229, 160, 255, 96, 139, 188, 194, 249, 197, 83, 4, 115, 87, 105, 201, 61, 85, 6, 133, 145, 219, 171, 8, 3, 56, 94, 10, 64, 191, 183, 178, 159, 195, 155, 228, 55, 251, 99, 192, 49, 97, 43, 117, 92, 135 }, { 123, 82, 128, 130, 86, 248, 92, 254, 20, 52, 171, 112, 143, 36, 114, 239, 159, 180, 198, 191, 33, 175, 187, 124, 168, 170, 23, 7, 70, 182, 253, 166, 53, 93, 210, 218, 201, 29, 173, 13, 228, 217, 121, 75, 153, 91, 225, 122, 145, 241, 108, 104, 11, 31, 68, 224, 81, 37, 189, 199, 165, 96, 179, 80, 151, 216, 56, 148, 72, 73, 211, 66, 45, 27, 238, 54, 188, 109, 255, 149, 247, 106, 152, 125, 44, 5, 76, 26, 40, 208, 185, 164, 14, 150, 117, 215, 62, 230, 236, 226 }, { 153, 242, 154, 54, 111, 28, 80, 61, 68, 175, 87, 45, 22, 133, 31, 76, 207, 163, 14, 159, 174, 50, 30, 15, 234, 100, 132, 255, 20, 1, 86, 246, 92, 123, 192, 49, 43, 71, 51, 219, 6, 59, 226, 185, 53, 74, 44, 125, 145, 81, 13, 206, 64, 130, 110, 24, 240, 201, 115, 158, 118, 161, 228, 157, 19, 162, 16, 211, 148, 191, 66, 194, 101, 26, 199, 129, 227, 41, 127, 104, 108, 231, 120, 137, 179, 241, 106, 37, 152, 32, 97, 79, 200, 243, 122, 178, 39, 254, 63, 165 }, { 95, 161, 134, 14, 192, 188, 34, 1, 97, 144, 51, 133, 154, 111, 204, 180, 47, 153, 3, 82, 57, 152, 61, 121, 38, 19, 106, 65, 93, 230, 234, 109, 70, 243, 226, 251, 13, 222, 148, 90, 237, 135, 232, 31, 157, 221, 151, 199, 227, 0, 246, 21, 168, 32, 162, 213, 4, 55, 255, 113, 43, 115, 44, 136, 203, 86, 175, 53, 202, 50, 20, 33, 68, 129, 78, 145, 103, 89, 215, 37, 79, 77, 8, 229, 206, 100, 138, 186, 72, 64, 7, 60, 22, 176, 214, 208, 141, 142, 195, 83 }, { 8, 102, 85, 18, 33, 190, 199, 146, 65, 213, 174, 87, 128, 222, 54, 47, 236, 71, 144, 138, 172, 160, 147, 92, 186, 130, 16, 24, 0, 10, 112, 229, 232, 208, 237, 41, 225, 70, 101, 129, 133, 86, 100, 200, 77, 149, 82, 224, 63, 61, 36, 114, 170, 14, 145, 226, 132, 123, 179, 53, 134, 90, 78, 91, 207, 148, 95, 80, 189, 245, 178, 227, 197, 150, 233, 105, 159, 205, 30, 252, 250, 231, 141, 247, 81, 218, 13, 182, 57, 55, 28, 239, 67, 60, 117, 88, 125, 46, 12, 94 }, { 43, 109, 106, 18, 153, 17, 162, 203, 101, 113, 29, 100, 0, 185, 200, 32, 175, 10, 219, 57, 224, 194, 171, 160, 158, 116, 36, 110, 207, 251, 137, 227, 135, 126, 54, 236, 205, 81, 42, 141, 3, 232, 69, 104, 241, 117, 73, 234, 84, 140, 77, 134, 226, 60, 21, 114, 195, 93, 229, 184, 242, 11, 173, 14, 41, 142, 209, 136, 249, 85, 91, 248, 97, 181, 239, 26, 197, 102, 215, 99, 165, 228, 44, 53, 96, 4, 243, 198, 211, 179, 58, 94, 217, 2, 178, 9, 121, 83, 128, 112 }, { 61, 50, 124, 131, 85, 248, 196, 128, 212, 24, 37, 87, 172, 169, 14, 223, 81, 19, 228, 252, 127, 183, 250, 249, 78, 208, 118, 7, 98, 242, 188, 186, 147, 171, 89, 13, 145, 123, 32, 65, 213, 246, 68, 9, 139, 34, 106, 41, 206, 187, 160, 133, 129, 167, 191, 244, 113, 173, 179, 243, 18, 203, 64, 159, 104, 55, 151, 165, 198, 253, 156, 241, 42, 218, 149, 114, 247, 234, 108, 126, 235, 119, 72, 70, 6, 29, 21, 107, 75, 138, 112, 15, 4, 161, 66, 238, 26, 189, 40, 100 }, { 11, 174, 12, 113, 222, 194, 55, 133, 232, 56, 69, 169, 131, 205, 98, 66, 163, 177, 72, 187, 85, 139, 218, 122, 41, 112, 198, 237, 159, 150, 104, 184, 16, 231, 213, 25, 141, 78, 255, 145, 110, 97, 70, 195, 47, 68, 221, 157, 45, 137, 26, 102, 99, 223, 203, 126, 183, 250, 118, 59, 164, 33, 229, 101, 149, 182, 192, 22, 1, 170, 158, 58, 180, 116, 48, 123, 155, 64, 18, 188, 74, 86, 211, 24, 77, 27, 61, 143, 7, 79, 201, 165, 54, 117, 209, 247, 36, 144, 241, 88 }, { 178, 205, 73, 37, 29, 129, 217, 14, 114, 202, 180, 238, 236, 127, 35, 54, 152, 30, 153, 67, 196, 206, 187, 168, 80, 60, 27, 201, 193, 156, 15, 204, 39, 105, 253, 99, 95, 210, 240, 94, 10, 163, 87, 28, 70, 197, 85, 182, 191, 66, 220, 46, 52, 96, 147, 243, 242, 254, 124, 213, 207, 13, 44, 134, 65, 113, 31, 48, 20, 88, 64, 230, 223, 71, 172, 100, 171, 121, 78, 138, 77, 101, 8, 75, 216, 24, 158, 3, 218, 43, 26, 61, 97, 151, 120, 1, 175, 56, 161, 115 }, { 74, 100, 24, 102, 156, 222, 111, 215, 175, 109, 32, 57, 43, 124, 169, 19, 85, 233, 207, 164, 136, 238, 158, 17, 29, 197, 179, 146, 129, 89, 83, 40, 54, 26, 142, 213, 193, 88, 11, 190, 21, 133, 227, 51, 231, 181, 168, 113, 202, 211, 58, 250, 167, 148, 35, 7, 94, 160, 161, 198, 234, 122, 154, 188, 23, 69, 209, 241, 67, 247, 123, 184, 68, 118, 62, 170, 144, 59, 192, 201, 0, 93, 244, 191, 232, 252, 6, 155, 174, 9, 36, 176, 50, 41, 55, 243, 214, 14, 86, 126 }, { 49, 17, 179, 13, 186, 211, 99, 81, 60, 11, 71, 84, 10, 128, 140, 103, 66, 134, 3, 21, 220, 165, 52, 144, 107, 250, 201, 106, 231, 145, 92, 177, 167, 252, 34, 149, 36, 195, 42, 146, 5, 247, 67, 120, 46, 26, 44, 206, 243, 69, 38, 160, 157, 37, 89, 221, 68, 182, 19, 118, 223, 94, 9, 70, 232, 189, 110, 97, 104, 190, 100, 119, 79, 124, 40, 246, 175, 56, 137, 126, 171, 63, 20, 196, 228, 241, 224, 0, 244, 164, 233, 240, 193, 138, 131, 237, 183, 90, 239, 33 }, { 240, 126, 161, 231, 101, 160, 164, 214, 202, 97, 201, 157, 129, 132, 250, 107, 88, 232, 146, 209, 246, 251, 200, 110, 154, 103, 112, 62, 44, 65, 230, 78, 138, 36, 33, 120, 167, 222, 37, 227, 81, 90, 252, 139, 77, 218, 184, 192, 86, 134, 70, 173, 204, 168, 221, 80, 27, 241, 89, 166, 67, 233, 150, 7, 32, 40, 105, 56, 225, 71, 21, 8, 6, 223, 187, 151, 127, 87, 24, 189, 206, 85, 185, 14, 174, 61, 190, 193, 66, 100, 38, 22, 106, 31, 235, 79, 25, 69, 254, 163 }, { 229, 210, 63, 11, 93, 173, 56, 231, 18, 131, 150, 171, 12, 65, 202, 249, 166, 242, 92, 79, 8, 255, 159, 26, 91, 219, 237, 61, 223, 82, 14, 233, 201, 47, 117, 146, 115, 136, 142, 248, 252, 51, 37, 86, 21, 3, 49, 211, 88, 185, 181, 119, 4, 78, 153, 163, 193, 198, 90, 221, 213, 118, 226, 169, 105, 180, 215, 184, 189, 207, 225, 110, 57, 32, 155, 204, 60, 69, 23, 206, 209, 46, 42, 66, 45, 182, 137, 247, 75, 74, 232, 52, 135, 133, 126, 48, 138, 40, 162, 24 }, { 201, 112, 118, 7, 14, 23, 91, 1, 102, 145, 56, 163, 179, 101, 25, 83, 209, 183, 99, 48, 181, 143, 234, 141, 191, 41, 244, 242, 211, 188, 151, 115, 167, 13, 108, 89, 128, 158, 130, 39, 2, 222, 11, 133, 113, 196, 47, 215, 100, 218, 6, 156, 10, 229, 109, 236, 225, 20, 170, 4, 199, 55, 53, 152, 254, 247, 40, 116, 230, 21, 189, 135, 68, 76, 231, 202, 243, 200, 184, 185, 59, 114, 221, 249, 172, 73, 103, 70, 35, 233, 81, 255, 126, 136, 171, 154, 162, 223, 36, 220 }, { 183, 63, 80, 248, 197, 19, 92, 160, 10, 15, 237, 82, 121, 173, 57, 112, 12, 225, 244, 181, 113, 17, 42, 169, 204, 198, 51, 209, 127, 106, 133, 25, 228, 211, 79, 101, 36, 233, 175, 86, 68, 128, 154, 241, 192, 23, 215, 217, 240, 70, 163, 59, 245, 196, 230, 159, 212, 77, 38, 232, 222, 72, 220, 206, 107, 4, 78, 122, 251, 33, 131, 213, 73, 179, 246, 123, 49, 46, 84, 239, 205, 108, 39, 89, 6, 37, 250, 201, 31, 120, 231, 170, 202, 5, 187, 117, 0, 234, 40, 236 }, { 251, 58, 46, 166, 235, 78, 68, 211, 123, 219, 100, 25, 202, 247, 70, 135, 244, 80, 1, 56, 112, 233, 159, 30, 250, 0, 243, 84, 6, 29, 177, 128, 169, 53, 102, 201, 81, 60, 47, 238, 154, 93, 213, 42, 57, 74, 110, 150, 72, 127, 214, 18, 186, 126, 228, 167, 103, 172, 104, 185, 226, 17, 20, 158, 241, 39, 26, 132, 22, 33, 83, 217, 51, 161, 97, 200, 253, 249, 16, 59, 76, 120, 43, 64, 119, 31, 73, 140, 178, 108, 225, 2, 98, 156, 130, 106, 163, 152, 195, 63 }, { 249, 179, 23, 144, 213, 28, 75, 96, 199, 115, 237, 125, 222, 124, 47, 247, 151, 205, 3, 181, 57, 111, 29, 13, 202, 54, 58, 15, 189, 128, 70, 161, 183, 168, 79, 221, 80, 10, 193, 63, 240, 162, 16, 83, 157, 219, 250, 173, 12, 244, 56, 116, 198, 102, 39, 30, 196, 223, 17, 89, 236, 42, 214, 188, 172, 32, 245, 169, 105, 5, 49, 50, 206, 81, 9, 37, 166, 220, 2, 180, 94, 137, 216, 176, 141, 227, 229, 22, 153, 100, 203, 31, 36, 149, 84, 52, 194, 19, 159, 104 } }; // Mask/Kernel int h[H_SIZE][H_SIZE] = { {0, 1, 0}, {1, -4, 1}, {0, 1, 0} }; // Convolution in each Input point int A[I_SIZE][I_SIZE]; int y, x; // Loop Counters // Number of threads int num_of_threads = -1; // Get number of threads - to create - from args if(argc == 2) num_of_threads = atoi(argv[1]); // Set number of threads or error if(num_of_threads > 0) { // Create threads omp_set_num_threads(num_of_threads); } else { printf("\nError: ['Create threads' Failed! Cause: Number of threads <= 0]\n\n"); exit(1); } // Print message printf("\n\nHello from %d threads!\n\n", num_of_threads); #pragma omp parallel for shared(A, I, h) private(y,x) for(y=0; y<I_SIZE; y++) { for(x=0; x<I_SIZE; x++) { // printf("(y,x):(%d,%d) | Thread: %d \n", y, x, omp_get_thread_num() ); // Print y, x and the thread A[y][x] = convolution(I, h, y, x); // Get convolution value in point y, x // printf("A[%d][%d]: %d \n\n", y, x, A[y][x] ); // Print convolution value in each point (y,x) } } y = 0; x = 0; printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]); y = 0; x = 99; printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]); y = 61; x = 83; printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]); y = 68; x = 12; printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]); y = 83; x = 96; printf("\nConvolution | A[%d][%d]=%d \n", y, x, A[y][x]); printf("\n\n"); if(SAVETOFILE) save_to_file(A); // system("PAUSE"); return 0; } // End main /* C O N V O L U T I O N */ int convolution(int I[I_SIZE][I_SIZE], int h[H_SIZE][H_SIZE], int y, int x) { int k, j; // Loop Counters int i1, j1, i2, j2; // Helpers (point) int res1, res2; // Helpers int result = 0; // Convolution value in point y, x for(k=-1; k<2; k++) { for(j=-1; j<2; j++) { i1 = j+1; j1 = k+1; i2 = y-j; j2 = x-k; if( ( (i1 >= 0) && (i1 < H_SIZE) ) && ( (j1 >= 0) && (j1 < H_SIZE) ) ) { res1 = h[i1][j1]; } else { res1 = 0; } if( ( (i2 >= 0) && (i2 < I_SIZE) ) && ( (j2 >= 0) && (j2 < I_SIZE) ) ) { res2 = I[i2][j2]; } else { res2 = 0; } // A += h[j+1][k+1]*I[y-j][x-k]; result += res1*res2; } } if(result < 0) { result = 0; } if(result > 255) { result = 255; } return result; // Demo code // return abs(result); } /* S A V E L I S T T O F I L E */ void save_to_file(int A[I_SIZE][I_SIZE]) { FILE *file_conv_res; int y, x; file_conv_res = fopen("convolution_z3.txt", "w"); if(file_conv_res == NULL){ printf("Error: ['Save to file' Failed!]"); exit(1); } for(y=0; y<I_SIZE; y++) { for(x=0; x<I_SIZE; x++) { fprintf(file_conv_res, "A[%d][%d] = %d \n", y, x, A[y][x]); } } fclose(file_conv_res); return; } //End of saving to file.
task-taskgroup-unrelated.c
/* * task-taskgroup-unrelated.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp task shared(var, a) { var++; OMPT_SIGNAL(a); // Give master thread time to execute the task in the taskgroup. OMPT_WAIT(a, 2); } #pragma omp taskgroup { #pragma omp task if (0) { // Dummy task. } // Give other threads time to steal the tasks. OMPT_WAIT(a, 1); OMPT_SIGNAL(a); } var++; } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-taskgroup-unrelated.c:46 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-taskgroup-unrelated.c:28 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
hello.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { int tid, nthreads; printf("Hello world!\n"); #pragma omp parallel private(tid) shared(nthreads) { tid = omp_get_thread_num(); #pragma omp single nthreads = omp_get_num_threads(); #pragma omp critical printf(" ... from thread ID %i.\n", tid); } printf("There were %i threads in total.\n", nthreads); return 0; }
simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd safelen(4) void test_no_clause() { int i; #pragma omp simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp simd' must be a for loop}} #pragma omp simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd firstprivate(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, ) for (i = 0; i < 16; ++i) ; // xxpected-error@+1 {{expected expression}} #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}} #pragma omp simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}} #pragma omp simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}} #pragma omp simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} #pragma omp simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as reduction}} #pragma omp parallel #pragma omp simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+3 {{reduction variable must be shared}} // expected-error@+2 {{private variable cannot be reduction}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-error@+1 {{expected expression}} #pragma omp simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction() for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction( : x) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(, for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(+ for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+: for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
array_sections-4.c
/* { dg-do run } */ #include <stdlib.h> void foo () { int A[30], *p; #pragma omp target data map(A[0:10]) { p = &A[0]; #pragma omp target map(p[3:7]) map(A[0:10]) { A[2] = 777; A[8] = 777; p[8] = 999; } } if (A[2] != 777 || A[8] != 999) abort (); } int main () { foo (); return 0; }
omp_hello.cpp
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; #pragma omp parallel private(nthreads, tid) { // get thread number tid = omp_get_thread_num(); printf("Hello world from thread = %d\n", tid); if (tid == 0) { nthreads = omp_get_num_threads(); printf("number of threads = %d\n", nthreads); } } return 0; }
kt_thread.c
#include "kt.h" #include "kt_thread.h" #include "kt_hmap.h" /* Initial buffer size for messages. Will resize when necessary. */ #ifndef MSG_DEFAULT_BUFFER #define MSG_DEFAULT_BUFFER 1000 #endif /****************************************************************************** * THREAD FUNCTIONS *****************************************************************************/ triangle_t * get_incoming_triangle_bucket( thread_ws * * ws, int const bucket_id, int32_t * num_triangles) { int const tid = omp_get_thread_num(); thread_ws * const my_ws = ws[tid]; int32_t ntriangles = 0; /* Count the number of triangles in bucket (across all threads) */ for(int t=0; t < my_ws->num_threads; ++t) { ntriangles += ws[t]->buckets[bucket_id].num_tri_msgs; } /* resize buffer if necessary */ if(my_ws->len_tri_buffer < ntriangles) { my_ws->len_tri_buffer = ntriangles; gk_free((void **) &(my_ws->tri_buffer), LTERM); my_ws->tri_buffer = gk_malloc(ntriangles * sizeof(*my_ws->tri_buffer), "tri_buffer"); } triangle_t * const restrict buffer = my_ws->tri_buffer; /* go over each thread's bucket */ int32_t buffer_ptr = 0; for(int t=0; t < my_ws->num_threads; ++t) { /* copy incoming bucket */ int32_t const num_msgs = ws[t]->buckets[bucket_id].num_tri_msgs; if(num_msgs > 0) { triangle_t const * const msgs = ws[t]->buckets[bucket_id].tri_msgs; for(int32_t m=0; m < num_msgs; ++m) { buffer[buffer_ptr++] = msgs[m]; } /* now empty buffer */ ws[t]->buckets[bucket_id].num_tri_msgs = 0; } } *num_triangles = ntriangles; return buffer; } pair_t * get_incoming_pair_bucket( thread_ws * * ws, int const bucket_id, int32_t * num_pairs) { int const tid = omp_get_thread_num(); thread_ws * const my_ws = ws[tid]; int32_t npairs = 0; /* Count the number of pairs in bucket (across all threads) */ for(int t=0; t < my_ws->num_threads; ++t) { npairs += ws[t]->buckets[bucket_id].num_pair_msgs; } /* resize buffer if necessary */ if(my_ws->len_pair_buffer < npairs) { my_ws->len_pair_buffer = npairs; gk_free((void **) &(my_ws->pair_buffer), LTERM); my_ws->pair_buffer = gk_malloc(npairs * sizeof(*my_ws->pair_buffer), "pair_buffer"); } pair_t * const restrict buffer = my_ws->pair_buffer; /* go over each thread's bucket */ int32_t buffer_ptr = 0; for(int t=0; t < my_ws->num_threads; ++t) { /* copy incoming bucket */ int32_t const num_msgs = ws[t]->buckets[bucket_id].num_pair_msgs; if(num_msgs > 0) { pair_t const * const msgs = ws[t]->buckets[bucket_id].pair_msgs; for(int32_t m=0; m < num_msgs; ++m) { buffer[buffer_ptr++] = msgs[m]; } /* now empty buffer */ ws[t]->buckets[bucket_id].num_pair_msgs = 0; } } *num_pairs = npairs; return buffer; } int64_t * get_incoming_edge_bucket( thread_ws * * ws, int const bucket_id, int64_t * num_edges, int const which) { assert(which >= 0); assert(which < 2); int const tid = omp_get_thread_num(); thread_ws * const my_ws = ws[tid]; int64_t nedges = 0; /* Count the number of edges in bucket (across all threads) */ for(int t=0; t < my_ws->num_threads; ++t) { if(which == 0) { nedges += ws[t]->buckets[bucket_id].num_edge_msgs; } else { nedges += ws[t]->buckets[bucket_id].num_edge_msgs2; } } /* resize buffer if necessary */ if(which == 0) { if(my_ws->len_edge_buffer < nedges) { my_ws->len_edge_buffer = nedges; gk_free((void **) &(my_ws->edge_buffer), LTERM); my_ws->edge_buffer = gk_malloc(nedges * sizeof(*my_ws->edge_buffer), "edge_buffer"); } } else { if(my_ws->len_edge_buffer2 < nedges) { my_ws->len_edge_buffer2 = nedges; gk_free((void **) &(my_ws->edge_buffer2), LTERM); my_ws->edge_buffer2 = gk_malloc(nedges * sizeof(*my_ws->edge_buffer2), "edge_buffer2"); } } int64_t * const restrict buffer = (which == 0) ? \ my_ws->edge_buffer : my_ws->edge_buffer2; if(which == 0) { /* go over each thread's bucket */ int32_t buffer_ptr = 0; for(int t=0; t < my_ws->num_threads; ++t) { /* copy incoming bucket */ int64_t const num_msgs = ws[t]->buckets[bucket_id].num_edge_msgs; int64_t const * const msgs = ws[t]->buckets[bucket_id].edge_msgs; for(int64_t m=0; m < num_msgs; ++m) { buffer[buffer_ptr++] = msgs[m]; } /* now empty buffer */ ws[t]->buckets[bucket_id].num_edge_msgs = 0; } } else { /* go over each thread's bucket */ int32_t buffer_ptr = 0; for(int t=0; t < my_ws->num_threads; ++t) { /* copy incoming bucket */ int64_t const num_msgs = ws[t]->buckets[bucket_id].num_edge_msgs2; int64_t const * const msgs = ws[t]->buckets[bucket_id].edge_msgs2; for(int64_t m=0; m < num_msgs; ++m) { buffer[buffer_ptr++] = msgs[m]; } /* now empty buffer */ ws[t]->buckets[bucket_id].num_edge_msgs2 = 0; } } *num_edges = nedges; return buffer; } void send_thread_tri_msg( triangle_t const * const triangle, int const bucket_dest, thread_ws * ws) { assert(bucket_dest >= 0); assert(bucket_dest < ws->num_buckets); assert(triangle->u < triangle->v); assert(triangle->u < triangle->w); assert(triangle->v < triangle->w); assert(triangle->u >= 0); assert(triangle->v >= 0); assert(triangle->w >= 0); thread_msg * const restrict dest_msgs = &(ws->buckets[bucket_dest]); /* resize if necessary */ if(dest_msgs->num_tri_msgs == dest_msgs->max_tri_msgs) { dest_msgs->max_tri_msgs *= 2; triangle_t * big = gk_malloc(dest_msgs->max_tri_msgs * sizeof(*big), "big"); for(int32_t i=0; i < dest_msgs->num_tri_msgs; ++i) { big[i] = dest_msgs->tri_msgs[i]; } gk_free((void **) &(dest_msgs->tri_msgs), LTERM); dest_msgs->tri_msgs = big; } /* append triangle to message queue */ int32_t const idx = dest_msgs->num_tri_msgs; dest_msgs->tri_msgs[idx] = *triangle; ++(dest_msgs->num_tri_msgs); #if VERBOSE printf("TRIANGLE thread %d -> %d (%d %d %d) = %zd %zd %zd\n", omp_get_thread_num(), bucket_dest, 1+triangle->u, 1+triangle->v, 1+triangle->w, triangle->uv, triangle->vw); #endif } void send_thread_pair_msg( triangle_t const * const triangle, int const bucket_dest, thread_ws * ws) { assert(triangle->v > 0); assert(triangle->w > 0); assert(triangle->v < triangle->w); pair_t tmp; tmp.v = triangle->v; tmp.w = triangle->w; tmp.vw = triangle->vw; send_thread_pair_msg_explicit(&tmp, bucket_dest, ws); } void send_thread_pair_msg_explicit( pair_t const * const pair, int const bucket_dest, thread_ws * ws) { assert(bucket_dest >= 0); assert(bucket_dest < ws->num_buckets); thread_msg * const restrict dest_msgs = &(ws->buckets[bucket_dest]); /* resize if necessary */ if(dest_msgs->num_pair_msgs == dest_msgs->max_pair_msgs) { dest_msgs->max_pair_msgs *= 2; pair_t * big = gk_malloc(dest_msgs->max_pair_msgs * sizeof(*big), "big"); for(int32_t i=0; i < dest_msgs->num_pair_msgs; ++i) { big[i] = dest_msgs->pair_msgs[i]; } gk_free((void **) &(dest_msgs->pair_msgs), LTERM); dest_msgs->pair_msgs = big; } /* append pair to message queue */ int32_t const idx = dest_msgs->num_pair_msgs; dest_msgs->pair_msgs[idx] = *pair; ++(dest_msgs->num_pair_msgs); #if VERBOSE printf("PAIR thread %d -> %d (%d %d)\n", omp_get_thread_num(), bucket_dest, pair->v, pair->w); #endif } void send_thread_edge_msg( int64_t edge_id, int const bucket_dest, thread_ws * ws, int const which) { assert(bucket_dest >= 0); assert(bucket_dest < ws->num_buckets); thread_msg * const restrict dest_msgs = &(ws->buckets[bucket_dest]); /* resize if necessary */ if(which == 0) { if(dest_msgs->num_edge_msgs == dest_msgs->max_edge_msgs) { dest_msgs->max_edge_msgs *= 2; int64_t * big = gk_malloc(dest_msgs->max_edge_msgs * sizeof(*big), "big"); for(int32_t i=0; i < dest_msgs->num_edge_msgs; ++i) { big[i] = dest_msgs->edge_msgs[i]; } gk_free((void **) &(dest_msgs->edge_msgs), LTERM); dest_msgs->edge_msgs = big; } /* append pair to message queue */ int32_t const idx = dest_msgs->num_edge_msgs; dest_msgs->edge_msgs[idx] = edge_id; ++(dest_msgs->num_edge_msgs); } else { if(dest_msgs->num_edge_msgs2 == dest_msgs->max_edge_msgs2) { dest_msgs->max_edge_msgs2 *= 2; int64_t * big = gk_malloc(dest_msgs->max_edge_msgs2 * sizeof(*big), "big"); for(int32_t i=0; i < dest_msgs->num_edge_msgs2; ++i) { big[i] = dest_msgs->edge_msgs2[i]; } gk_free((void **) &(dest_msgs->edge_msgs2), LTERM); dest_msgs->edge_msgs2 = big; } /* append pair to message queue */ int32_t const idx = dest_msgs->num_edge_msgs2; dest_msgs->edge_msgs2[idx] = edge_id; ++(dest_msgs->num_edge_msgs2); } #if VERBOSE printf("EDGE thread-%d -> bucket-%d edge-%zd \n", omp_get_thread_num(), bucket_dest, edge_id); #endif } thread_ws * * alloc_thread_ws( gk_graph_t const * const lgraph, gk_graph_t const * const ugraph) { int const nthreads = omp_get_max_threads(); int64_t const nedges = ugraph->xadj[ugraph->nvtxs]; ssize_t const max_degree = gk_max(graph_max_degree(lgraph), graph_max_degree(ugraph)); int32_t const max_support = max_elem(ugraph->iadjwgt, nedges); printf("max_support: %d\n\n", max_support); thread_ws * * ws = gk_malloc(nthreads * sizeof(*ws), "ws"); /* each thread has outgoing buffers for all threads */ #pragma omp parallel { thread_ws * my_ws = gk_malloc(sizeof(*my_ws), "ws[tid]"); my_ws->num_threads = nthreads; my_ws->num_buckets = nthreads * KT_BUCKETS_PER_THREAD; my_ws->buckets = gk_malloc(my_ws->num_buckets * sizeof(*(my_ws->buckets)), "messages"); /* allocate message to each thread */ for(int b=0; b < my_ws->num_buckets; ++b) { my_ws->buckets[b].max_tri_msgs = MSG_DEFAULT_BUFFER; my_ws->buckets[b].num_tri_msgs = 0; my_ws->buckets[b].tri_msgs = gk_malloc(MSG_DEFAULT_BUFFER * sizeof(triangle_t), "triangles"); my_ws->buckets[b].max_pair_msgs = MSG_DEFAULT_BUFFER; my_ws->buckets[b].num_pair_msgs = 0; my_ws->buckets[b].pair_msgs = gk_malloc(MSG_DEFAULT_BUFFER * sizeof(pair_t), "pairs"); my_ws->buckets[b].max_edge_msgs = MSG_DEFAULT_BUFFER; my_ws->buckets[b].num_edge_msgs = 0; my_ws->buckets[b].edge_msgs = gk_malloc(MSG_DEFAULT_BUFFER * sizeof(int64_t), "edges"); } /* buffer for finding triangles */ my_ws->w_ids = gk_malloc(max_support * sizeof(*my_ws->w_ids), "w_ids"); my_ws->uw_idxs = gk_malloc(max_support * sizeof(*my_ws->uw_idxs), "uw_idxs"); my_ws->vw_idxs = gk_malloc(max_support * sizeof(*my_ws->vw_idxs), "vw_idxs"); #if USE_HMAP my_ws->hmap = alloc_hmap(max_degree); #endif /* initially empty buffers */ my_ws->tri_buffer = NULL; my_ws->pair_buffer = NULL; my_ws->edge_buffer = NULL; my_ws->edge_buffer2 = NULL; my_ws->len_tri_buffer = 0; my_ws->len_pair_buffer = 0; my_ws->len_edge_buffer = 0; my_ws->len_edge_buffer = 0; my_ws->len_edge_buffer2 = 0; /* store my workspace */ ws[omp_get_thread_num()] = my_ws; } /* end omp parallel */ return ws; } thread_ws * * alloc_thread_ws_big( gk_graph_t const * const graph) { int const nthreads = omp_get_max_threads(); int64_t const nedges = graph->xadj[graph->nvtxs]; ssize_t const max_degree = graph_max_degree(graph); thread_ws * * ws = gk_malloc(nthreads * sizeof(*ws), "ws"); /* each thread has outgoing buffers for all threads */ #pragma omp parallel { thread_ws * my_ws = gk_malloc(sizeof(*my_ws), "ws[tid]"); my_ws->num_threads = nthreads; my_ws->num_buckets = nthreads * KT_BUCKETS_PER_THREAD; my_ws->buckets = gk_malloc(my_ws->num_buckets * sizeof(*(my_ws->buckets)), "messages"); /* allocate message to each thread */ for(int b=0; b < my_ws->num_buckets; ++b) { my_ws->buckets[b].max_tri_msgs = MSG_DEFAULT_BUFFER; my_ws->buckets[b].num_tri_msgs = 0; my_ws->buckets[b].tri_msgs = gk_malloc(MSG_DEFAULT_BUFFER * sizeof(triangle_t), "triangles"); my_ws->buckets[b].max_pair_msgs = MSG_DEFAULT_BUFFER; my_ws->buckets[b].num_pair_msgs = 0; my_ws->buckets[b].pair_msgs = gk_malloc(MSG_DEFAULT_BUFFER * sizeof(pair_t), "pairs"); my_ws->buckets[b].max_epair_msgs = MSG_DEFAULT_BUFFER; my_ws->buckets[b].num_epair_msgs = 0; my_ws->buckets[b].epair_msgs = gk_malloc(MSG_DEFAULT_BUFFER * sizeof(pair_t), "pairs"); my_ws->buckets[b].max_edge_msgs = MSG_DEFAULT_BUFFER; my_ws->buckets[b].num_edge_msgs = 0; my_ws->buckets[b].edge_msgs = gk_malloc(MSG_DEFAULT_BUFFER * sizeof(int64_t), "edges"); my_ws->buckets[b].max_edge_msgs2 = MSG_DEFAULT_BUFFER; my_ws->buckets[b].num_edge_msgs2 = 0; my_ws->buckets[b].edge_msgs2 = gk_malloc(MSG_DEFAULT_BUFFER * sizeof(int64_t), "edges2"); } /* buffer for finding triangles */ my_ws->w_ids = NULL; my_ws->uw_idxs = NULL; my_ws->vw_idxs = NULL; /* initially empty buffers */ my_ws->tri_buffer = NULL; my_ws->pair_buffer = NULL; my_ws->epair_buffer = NULL; my_ws->edge_buffer = NULL; my_ws->edge_buffer2 = NULL; my_ws->len_tri_buffer = 0; my_ws->len_pair_buffer = 0; my_ws->len_epair_buffer = 0; my_ws->len_edge_buffer = 0; my_ws->len_edge_buffer2 = 0; /* store my workspace */ ws[omp_get_thread_num()] = my_ws; } /* end omp parallel */ return ws; } void send_thread_epair_msg( epair_t * const pair, int const bucket_dest, thread_ws * ws) { assert(bucket_dest >= 0); assert(bucket_dest < ws->num_buckets); thread_msg * const restrict dest_msgs = &(ws->buckets[bucket_dest]); /* resize if necessary */ if(dest_msgs->num_epair_msgs == dest_msgs->max_epair_msgs) { dest_msgs->max_epair_msgs *= 2; epair_t * big = gk_malloc(dest_msgs->max_epair_msgs * sizeof(*big), "big"); for(int32_t i=0; i < dest_msgs->num_epair_msgs; ++i) { big[i] = dest_msgs->epair_msgs[i]; } gk_free((void **) &(dest_msgs->epair_msgs), LTERM); dest_msgs->epair_msgs = big; } /* append epair to message queue */ int32_t const idx = dest_msgs->num_epair_msgs; dest_msgs->epair_msgs[idx] = *pair; ++(dest_msgs->num_epair_msgs); #if VERBOSE printf("PAIR thread %d -> %d (%d %d)\n", omp_get_thread_num(), bucket_dest, pair->v, pair->w); #endif } epair_t * get_incoming_epair_bucket( thread_ws * * ws, int const bucket_id, int64_t * num_pairs) { int const tid = omp_get_thread_num(); thread_ws * const my_ws = ws[tid]; int32_t npairs = 0; /* Count the number of pairs in bucket (across all threads) */ for(int t=0; t < my_ws->num_threads; ++t) { npairs += ws[t]->buckets[bucket_id].num_epair_msgs; } /* resize buffer if necessary */ if(my_ws->len_epair_buffer < npairs) { my_ws->len_epair_buffer = npairs; gk_free((void **) &(my_ws->epair_buffer), LTERM); my_ws->epair_buffer = gk_malloc(npairs * sizeof(*my_ws->epair_buffer), "epair_buffer"); } epair_t * const restrict buffer = my_ws->epair_buffer; /* go over each thread's bucket */ int32_t buffer_ptr = 0; for(int t=0; t < my_ws->num_threads; ++t) { /* copy incoming bucket */ int32_t const num_msgs = ws[t]->buckets[bucket_id].num_epair_msgs; if(num_msgs > 0) { epair_t const * const msgs = ws[t]->buckets[bucket_id].epair_msgs; for(int32_t m=0; m < num_msgs; ++m) { buffer[buffer_ptr++] = msgs[m]; } /* now empty buffer */ ws[t]->buckets[bucket_id].num_epair_msgs = 0; } } *num_pairs = npairs; return buffer; } void free_thread_ws( thread_ws * * ws) { #pragma omp parallel { int const tid = omp_get_thread_num(); for(int b=0; b < ws[tid]->num_buckets; ++b) { gk_free((void **) &(ws[tid]->buckets[b].tri_msgs), LTERM); gk_free((void **) &(ws[tid]->buckets[b].pair_msgs), LTERM); gk_free((void **) &(ws[tid]->buckets[b].edge_msgs), LTERM); gk_free((void **) &(ws[tid]->buckets[b].edge_msgs2), LTERM); } #if USE_HMAP free_hmap(ws[tid]->hmap); #endif gk_free((void **) &(ws[tid]->w_ids), LTERM); gk_free((void **) &(ws[tid]->uw_idxs), LTERM); gk_free((void **) &(ws[tid]->vw_idxs), LTERM); gk_free((void **) &(ws[tid]->tri_buffer), LTERM); gk_free((void **) &(ws[tid]->pair_buffer), LTERM); gk_free((void **) &(ws[tid]->edge_buffer), LTERM); gk_free((void **) &(ws[tid]->buckets), LTERM); gk_free((void **) &(ws[tid]), LTERM); } gk_free((void **) &(ws), LTERM); } void thread_time_stats( double const * const restrict thread_times, int const num_threads, int const padding) { double total_thread_time = 0.; double min_time = thread_times[0]; double max_time = thread_times[0]; for(int t=0; t < num_threads; ++t) { double const curr_time = thread_times[t * padding]; total_thread_time += curr_time; min_time = gk_min(curr_time, min_time); max_time = gk_max(curr_time, max_time); } double const avg_time = total_thread_time / num_threads; printf("\tmin: %0.3fs avg: %0.3fs max: %0.3fs imbalance(max/avg): %0.2fx\n", min_time, avg_time, max_time, max_time / avg_time); }
syrk.c
/** * syrk.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include "BenchmarksUtil.h" #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> /* Problem size */ #define N SIZE #define M SIZE /* Declared constant values for alpha and beta */ /* (same as values in PolyBench 2.0) */ #define alpha 12435 #define beta 4546 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array_A(DATA_TYPE *A) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { A[i * M + j] = ((DATA_TYPE)i * j) / N; } } } void init_array_C(DATA_TYPE *C) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { C[i * M + j] = ((DATA_TYPE)i * j + 2) / N; } } } int compareResults(DATA_TYPE *C, DATA_TYPE *C_OMP) { int i, j, fail; fail = 0; // Compare C with D for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { if (percentDiff(C[i * M + j], C_OMP[i * M + j]) > ERROR_THRESHOLD) { fail++; } } } return fail; } void syrk(DATA_TYPE *A, DATA_TYPE *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { C[i * M + j] *= beta; } } for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { for (k = 0; k < M; k++) { C[i * N + j] += alpha * A[i * M + k] * A[j * M + k]; } } } } void syrkOMP(DATA_TYPE *A, DATA_TYPE *C) { #pragma omp target teams map(to : A[:N*M]) map(tofrom : C[:N*M]) device(OMP_DEVICE_ID) { #pragma omp distribute parallel for collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { C[i * M + j] *= beta; } } #pragma omp distribute parallel for collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { for (int k = 0; k < M; k++) { C[i * N + j] += alpha * A[i * M + k] * A[j * M + k]; } } } } } int main() { fprintf(stdout, "<< Symmetric rank-k operations >>\n"); // declare arrays and allocate memory for common arrays DATA_TYPE *A = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); DATA_TYPE *C = NULL; DATA_TYPE *C_OMP = NULL; // init array A init_array_A(A); // run OMP on GPU or CPU if enabled #if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU) C_OMP = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); init_array_C(C_OMP); BENCHMARK_OMP(syrkOMP(A, C_OMP)); // prevent dead-code elimination DCE_PREVENT(C_OMP, N*M); #endif // run sequential version if enabled #ifdef RUN_CPU_SEQ C = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); init_array_C(C); BENCHMARK_CPU(syrk(A, C)); // prevent dead-code elimination DCE_PREVENT(C, N*M); #endif int fail = 0; // if TEST is enabled, then compare OMP results against sequential mode #ifdef RUN_TEST fail = compareResults(C, C_OMP); printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail); #endif // release memory free(A); free(C); free(C_OMP); return fail; }
normal.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | %sort-threads \ // RUN: | FileCheck --check-prefix=THREADS %s // REQUIRES: ompt #include "callback.h" int main() { #pragma omp parallel num_threads(4) { print_ids(0); print_ids(1); } print_fuzzy_address(1); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // Only check callback names, arguments are verified in THREADS below. // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // Note that we cannot ensure that the worker threads have already called // barrier_end and implicit_task_end before parallel_end! // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end // THREADS: 0: NULL_POINTER=[[NULL:.*$]] // THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_thread_begin // THREADS-SAME: thread_type=ompt_thread_initial=1, thread_id=[[MASTER_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin // THREADS-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]] // THREADS-SAME: parent_task_frame.exit=[[NULL]] // THREADS-SAME: parent_task_frame.reenter={{0x[0-f]+}} // THREADS-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4 // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin // THREADS-SAME: parallel_id=[[PARALLEL_ID]] // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0 // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: task level 1 // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]] // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-NOT: ompt_event_implicit_task_end // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: task level 0 // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end // parallel_id is 0 because the region ended in the barrier! // THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin // THREADS-SAME: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin // THREADS-SAME: parallel_id=[[PARALLEL_ID]] // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0 // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1 // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-NOT: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end // parallel_id is 0 because the region ended in the barrier! // THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin // THREADS-SAME: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin // THREADS-SAME: parallel_id=[[PARALLEL_ID]] // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0 // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1 // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-NOT: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end // parallel_id is 0 because the region ended in the barrier! // THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin // THREADS-SAME: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin // THREADS-SAME: parallel_id=[[PARALLEL_ID]] // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0 // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: task level 1 // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-NOT: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end // parallel_id is 0 because the region ended in the barrier! // THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] return 0; }
pr72781.c
/* PR middle-end/72781 */ /* { dg-do compile } */ /* { dg-additional-options "-O2 -Wuninitialized" } */ int u; void foo (int *p) { int i; #pragma omp for simd lastprivate(u) schedule (static, 32) /* { dg-bogus "may be used uninitialized in this function" } */ for (i = 0; i < 1024; i++) u = p[i]; } void bar (int *p) { int i; #pragma omp taskloop simd lastprivate(u) /* { dg-bogus "may be used uninitialized in this function" } */ for (i = 0; i < 1024; i++) u = p[i]; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
resource_strings.h
#pragma once #include <torch/csrc/jit/frontend/code_template.h> namespace torch { namespace jit { namespace fuser { namespace cpu { /*with type_as not checking type of its input, a fusion group can have non-fp32 tensor as input. Correct code for this case is generated, however, nvrtc does not know how to handle int*_t integer types, so typedefs help it handle those cases*/ static auto type_declarations_template = CodeTemplate(R"( #define POS_INFINITY INFINITY #define NEG_INFINITY -INFINITY typedef ${IndexType} IndexType; template<typename T, size_t N> struct TensorInfo { T* data; IndexType sizes[N]; IndexType strides[N]; }; template<typename T> struct TensorInfo<T, 0> { T * data; }; )"); static auto cpu_compilation_unit_template = CodeTemplate(R"( #include <math.h> #include <cstddef> #include <cstdint> double rsqrt(double x) { return 1.0/sqrt(x); } float rsqrtf(float x) { return 1.0f/sqrtf(x); } double frac(double x) { return x - trunc(x); } float fracf(float x) { return x - truncf(x); } ${type_declarations} #ifdef _MSC_VER template<size_t n> struct int_of_size; #define DEFINE_INT_OF_SIZE(int_t) \ template<> struct int_of_size<sizeof(int_t)> { using type = int_t; } DEFINE_INT_OF_SIZE(int64_t); DEFINE_INT_OF_SIZE(int32_t); DEFINE_INT_OF_SIZE(int16_t); DEFINE_INT_OF_SIZE(int8_t); #undef DEFINE_INT_OF_SIZE template <typename T> using int_same_size_t = typename int_of_size<sizeof(T)>::type; #define IndexTypeLoop int_same_size_t<IndexType> #define ToIndexTypeLoop(x) static_cast<IndexTypeLoop>(x) #else #define IndexTypeLoop IndexType #define ToIndexTypeLoop(x) x #endif #define OMP_THRESHOLD 100000 static void ${kernelName}_kernel(IndexType totalElements, ${formals}) { #pragma omp parallel for if(totalElements > OMP_THRESHOLD) for (IndexTypeLoop linearIndex = 0; linearIndex < ToIndexTypeLoop(totalElements); linearIndex += 1) { // Convert `linearIndex` into an offset of tensor: ${tensorOffsets} // calculate the results ${kernelBody} } } #ifdef _WIN32 #define JIT_API __declspec(dllexport) #else #define JIT_API #endif extern "C" JIT_API void ${kernelName}(IndexType totalElements, void ** args) { ${kernelName}_kernel(totalElements ${,argument_loads}); } )"); } // namespace cpu } // namespace fuser } // namespace jit } // namespace torch
omp-not-thrdprvt.c
#include <stdio.h> int x, y; int main() { #pragma omp parallel { x = omp_get_thread_num(); } #pragma omp parallel { if(x % 2 == 0) y = x + 1; else y = 0; } #pragma omp parallel { printf("%d, %d %d\n", x, y, omp_get_thread_num()); } #pragma omp parallel { printf("%d, %d %d\n", x, y, omp_get_thread_num()); } }
cmuselike.c
/*** Likelihood implementation in C -------------------------------- Copyright (c) 2017 Johannes Buchner Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***/ #include<stdbool.h> #include<stdio.h> #include<stdlib.h> #include<assert.h> #include<math.h> #ifdef PARALLEL #include<omp.h> #endif #define IFVERBOSE if(0) #define IFDEBUG if(0) #define adouble double #define bdouble double #define sqr(x) (pow(x,2)) // Parallelisation does not work at the moment, you are welcome to fix it // ret = lib.like(yd, vd, ypred, data_mask, ndata, nspec, Lout) int like( const void * yyp, const void * vvp, const void * ypredp, const void * data_maskp, const int ndata, const int nx, void * Loutp ) { const adouble * yy = (const adouble*) yyp; const adouble * vv = (const adouble*) vvp; const adouble * ypred = (const adouble*) ypredp; const bool * data_mask = (const bool*) data_maskp; adouble * Lout = (adouble*) Loutp; #ifdef PARALLEL #pragma omp parallel for #endif for (int i = 0; i < ndata; i++) { if (data_mask[i]) { // compute s double s1 = 0.; double s2 = 1e-10; for (int j = 0; j < nx; j++) { s1 += yy[i+j*ndata] * ypred[j] / vv[i+j*ndata]; s2 += pow(ypred[j], 2) / vv[i+j*ndata]; } double s = s1/s2; double chi = 0.; for (int j = 0; j < nx; j++) { chi += pow(yy[i+j*ndata] - s * ypred[j], 2) / vv[i+j*ndata]; } Lout[i] = -0.5 * chi; } } return 0; }
gen2DTorus.c
#include "defs.h" #define PARALLEL_SDG /* Set this variable to zero to run the data generator on one thread (for debugging purposes) */ double gen2DTorus(graphSDG* SDGdata) { VERT_T *src, *dest; WEIGHT_T *wt; #ifdef _OPENMP omp_lock_t* vLock; #endif double elapsed_time; int seed; elapsed_time = get_seconds(); /* allocate memory for edge tuples */ src = (VERT_T *) malloc(M*sizeof(VERT_T)); dest = (VERT_T *) malloc(M*sizeof(VERT_T)); wt = (WEIGHT_T *) malloc(M*sizeof(WEIGHT_T)); assert(src != NULL); assert(dest != NULL); assert(wt != NULL); /* sprng seed */ seed = 2387; #ifdef _OPENMP #ifdef PARALLEL_SDG omp_set_num_threads(omp_get_max_threads()); // omp_set_num_threads(16); #else omp_set_num_threads(1); #endif #endif #ifdef _OPENMP #pragma omp parallel #endif { int tid, nthreads; #ifdef DIAGNOSTIC double elapsed_time_part; #endif int *stream; LONG_T n, m; LONG_T i, j, x, y; LONG_T x_start, x_end, offset; LONG_T count; #ifdef _OPENMP nthreads = omp_get_num_threads(); tid = omp_get_thread_num(); #else nthreads = 1; tid = 0; #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef DIAGNOSTIC if (tid == 0) elapsed_time_part = get_seconds(); #endif n = N; m = M; if (SCALE % 2 == 0) { x = 1<<(SCALE/2); y = 1<<(SCALE/2); } else { x = 1<<((SCALE+1)/2); y = 1<<((SCALE-1)/2); } count = 0; x_start = (x/nthreads)*tid; x_end = (x/nthreads)*(tid+1); if (tid == 0) x_start = 0; if (tid == nthreads-1) x_end = x; offset = 4*x_start*y; fprintf(stderr, "tid: %d, x_start: %d, x_end: %d, offset: %d\n", tid, x_start, x_end, offset); /* if (tid == 0) { */ for (i = x_start; i < x_end; i++) { for (j = 0; j < y; j++) { /* go down */ if (j > 0) { src[offset+count] = y*i + j; dest[offset+count] = y*i + j - 1; } else { src[offset+count] = y*i + j; dest[offset+count] = y*i + y - 1; } count++; /* go up */ if (j < y-1) { src[offset+count] = y*i + j; dest[offset+count] = y*i + j + 1; } else { src[offset+count] = y*i + j; dest[offset+count] = y*i; } count++; /* go left */ if (i > 0) { src[offset+count] = y*i + j; dest[offset+count] = y*(i-1) + j; } else { src[offset+count] = y*i + j; dest[offset+count] = y*(x-1) + j; } count++; /* go right */ if (i < x-1) { src[offset+count] = y*i + j; dest[offset+count] = y*(i+1) + j; } else { src[offset+count] = y*i + j; dest[offset+count] = j; } count++; } } /* } */ #ifdef _OPENMP #pragma omp barrier #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Tuple generation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { wt[i] = 1 + MaxIntWeight * sprng(stream); } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Generating edge weights: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif SDGdata->n = n; SDGdata->m = m; SDGdata->startVertex = src; SDGdata->endVertex = dest; SDGdata->weight = wt; #ifdef _OPENMP #endif } elapsed_time = get_seconds() - elapsed_time; return elapsed_time; }
GMS_hw_metrics_time_series_analysis.h
#ifndef __GMS_HW_METRICS_TIME_SERIES_ANALYSIS_H__ #define __GMS_HW_METRICS_TIME_SERIES_ANALYSIS_H__ #include <cstdint> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "Timsac_iface.h" #include "GMS_descriptive_statistics.hpp" #include "GMS_convert_numeric_data_types.hpp" #if !defined(DESCRIPTIVE_STATISTICS_DATA) #define DESCRIPTIVE_STATISTICS_DATA \ float __restrict * a = NULL; \ float __restrict * df32 = NULL; \ float w = 0.0f; \ float pw = 0.0f; \ int32_t ifault = -1; \ float srsd = 0.0f; \ float svar = 0.0f; \ float skew = 0.0f; \ float kurt = 0.0f; \ float autocor = 0.0f; \ float xmid = 0.0f; \ float xmean = 0.0f; \ float xmidm = 0.0f; \ float xmed = 0.0f; \ float smin = 0.0f; \ float smax = 0.0f; \ float xrange = 0.0f; \ float xsd = 0.0f; \ float xrelsd = 0.0f; \ float xvar = 0.0f; /* Apply Time-Series analysis (Timsac) subroutine "CANARM". The data itself is invariant from the point of view of specific subroutine i.e. "CANARM". Attempt to calculate the descritpive statistics if result of Wilk-Shapiro normality test allows it. */ __attribute__((hot)) __attribute__((aligned(32))) template<int32_t len, int32_t lagh> void hw_perf_metrics_canarm(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict metric_name) { static_assert(len <= 100000, "Input data length can not exceed -- **100000** elements!!"); //const int32_t lagh = (int32_t)(std::sqrtf((int32_t)len)); const int32_t len2 = len/2; // shapiro-wilk 'a' array length. const std::size_t lag2len = (std::size_t)(lagh*lagh); const std::size_t lag3len = (std::size_t)lag2len*len; constexpr float w_limit = 0.05f; __attribute__((aligned(64))) double acor[lagh] = {}; __attribute__((aligned(64))) double acov[lagh] = {}; __attribute__((aligned(64))) double xarcoef[lagh] = {}; __attribute__((aligned(64))) double xv[lagh] = {}; __attribute__((aligned(64))) double xaic[lagh] = {}; __attribute__((aligned(64))) double xparcor[lagh] = {}; __attribute__((aligned(64))) double xdicm[lagh] = {}; __attribute__((aligned(64))) double xb[lagh] = {}; __attribute__((aligned(64))) double xa[lagh] = {}; __attribute__((aligned(64))) int32_t xm1[lagh] = {}; __attribute__((aligned(64))) int32_t xm2[lagh] = {}; __attribute__((aligned(64))) int32_t xpo[lagh] = {}; double __restrict * xw = NULL; double __restrict * xz = NULL; double __restrict * xRs = NULL; double __restrict * xchi = NULL; int32_t __restrict * xndt = NULL; double __restrict * xdic = NULL; FILE * fptr = NULL; double xoaic = 0.0; double xmean = 0.0; int32_t xmo = 0; int32_t xnc = 0; int32_t xk = 0; int32_t xl = 0; DESCRIPTIVE_STATISTICS_DATA const bool init = false; // swilk init argument. // OpenMP multithreaded calls to _mm_malloc (using parallel sections) // Multithreaded allocation for large dynamic arrays. if(len > 10000) { #pragma omp parallel sections { #pragma section { xw = reinterpret_cast<double*>(_mm_malloc(lag3len*sizeof(double),64)); } #pragma section { xz = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); } #pragma section { xRs = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); } #pragma section { xchi = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); } #pragma section { xndt = reinterpret_cast<int32_t*>(_mm_malloc(lag2len*sizeof(int32_t),64)); } #pragma section { xdic = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); } #pragma section { a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); } #pragma section { df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); } } // Single thread checks the returned pointers!! const bool isnull = (NULL==a) || (NULL==xdic) || (NULL==xndt) || (NULL==xchi) || (NULL==xRs) || (NULL==xz) || (NULL==xw) || (NULL==df32); if(__builtin_expect(isnull,0)) {MALLOC_FAILED} } else { // for if: len < 10000 xw = reinterpret_cast<double*>(_mm_malloc(lag3len*sizeof(double),64)); if(__builtin_except(NULL==xw,0)) {MALLOC_FAILED} xz = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); if(__builtin_except(NULL==xz,0)) {MALLOC_FAILED} xRs = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); if(__builtin_except(NULL==xRs,0)) {MALLOC_FAILED} xchi = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); if(__builtin_except(NULL==xchi,0)) {MALLOC_FAILED} xndt = reinterpret_cast<int32_t*>(_mm_malloc(lag2len*sizeof(int32_t),64)); if(__builtin_except(NULL==xndt,0)) {MALLOC_FAILED} xdic = reinterpret_cast<double*>(_mm_malloc(lag2len*sizeof(double),64)); if(__builtin_except(NULL==xdic,0)) {MALLOC_FAILED} a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__built_except(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_except(NULL==df32,0)) {MALLOC_FAILED} } autcorf_(&data[0],&len,&acov[0],&acor[0],&lagh,&xmean); canarmf_(&len,&lagh,&acov[0],&xarcoef[0],&lagh,&xv[0],&xaic[0],&xoaic[0], &xmo,&xparcor[0],&xnc,&xm1[0],&xm2[0],&xw[0],&xz[0],&xRs[0], &xchi[0],&xndt[0],&xdic[0],&xdicm[0],&xpo[0],&xk,&xb[0],&xl, &xa[0],&lagh,&lagh); fptr = fopen(fname,"a+"); if(NULL==fptr) { printf("File open error: %s\n",fname1); std::exit(EXIT_FAILURE); } fprintf(fptr,"HW Metric name: %s\n",metric_name); fprintf(fptr,"mo=%.16f, oaic=%.16f, nc=%.16f, k=%.16f, l=%.16f\n",xmo,xoaic,xnc,xk,xl); fprintf(fptr, "arcoef, v, aic, parcor, dicm, b, a, m1, m2, po\n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fptr,"%.16f %.16f %.16f %.16f %.16f %.16f %.16f %d %d %.16f\n", xarcoef[i],xv[i],xaic[i],xparcor[i],xdicm[i],xb[i],xa[i],xm1[i],xm2[i],xpo[i]);} fprintf(fptr,"w\n"); for(int32_t i = 0; i != lag3len; ++i) {fprintf(fptr,"%.16f\n",xw[i]);} fprintf(fptr, "z, Rs, chi, ndt, dic\n"); for(int32_t i = 0; i != lag2len; ++i) {fprintf(fptr, " %.16f %.16f %.16f %d %.16f\n", xz[i],xRs[i],xchi[i],xndt[i],xdic[i]);} fprintf(fptr, "End of CANARMF results dump\n"); // Sort a samples arrays in ascending order //std::sort(data,data+len); cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); printf("Calling Shapiro-Wilk normality test subroutine!!\n"); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,n2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fptr,"Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",w,pw); if(pw<w_limit) fprintf(fptr,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fptr,"Descriptive Statistics calculations!!\n"); fprintf(fptr,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fptr,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fptr,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fptr,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fptr,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fptr,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fptr,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fptr,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fptr,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fptr); _mm_free(df32); _mm_free(a); _mm_free(xdic); _mm_free(xndt); _mm_free(xchi); _mm_free(xRs); _mm_free(xz); _mm_free(xw); } /* Apply Time-Series analysis (Timsac) subroutine "MULCOR". The data itself is invariant from the point of view of specific subroutine i.e. "MULCOR". No descriptive statistics computations for this function. */ #include <string> __attribute__((hot)) __attribute__((aligned(32))) template<int32_t ndim, int32_t ldim, int32_t lagh> void hw_perf_metrics_mulcor(const double * __restrict __attribute__((aligned(64))) mvdata, //multivariable data const char * __restrict fname, const std::string * __restrict metrics){ static_assert(ndim <= 11, "Number of dimensions can not exceed 11!!"); static_assert(ldim <= 100000, "Number of elements per dimension can not exceed 100000!!"); //const int32_t lagh = (int32_t)(2.0f*std::sqrt((float)ldim)); const int32_t totlen = ndim*ldim; const std::size_t mvd_len = (std::size_t)(lagh*ndim*ndim); __attribute__((aligned(64))) double xmean[ndim+6]; double * __restrict xcov = NULL; double * __restrict xcor = NULL; FILE * fp = NULL; xcov = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcov,0)) {MALLOC_FAILED} xcor = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcor,0)) {MALLOC_FAILED} // Call TIMSAC MULCORF subroutine mulcorf_(&mvdata[0],&totlen,&ndim,&lagh,&xmean[0],&xcov[0],&xcor[0]); if(fopen(&fp,fname,"a+") != 0) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } for(int32_t i = 0; i != ndim; ++i) {fprintf(fp,"HW Metrics: %s\n",metrics[i].c_str()); fprintf(fp," HW Metrics multivariate mean\n"); for(int32_t i = 0; i != n_dim; ++i) { fprintf(fp,"%.16f\n",xmean[i]);} fprintf(fp1," HW Metrics Multivariate Correlation and Covariance\n"); for(int32_t i = 0; i != lagh*n_dim*n_dim; ++i) {fprintf(fp,"%.16f %.16f\n",xcor[i],xcov[i]);} fclose(fp1); _mm_free(xcor); _mm_free(xcov); } /* Apply Time-Series analysis (Timsac) subroutine "MULSPE". The data itself is invariant from the point of view of specific subroutine i.e. "MULSPE". No descriptive statistics computations for this function. */ __attribute__((hot)) __attribute__((aligned(32))) template<int32_t ndim, int32_t ldim, int32_t lagh> void hw_perf_metrics_mulspe(const double * __restrict __attribute__((aligned(64))) mvdata, // Multidimensional data const char * __restrict fname, const std::string * __restrict metrics) { static_assert(ndim <= 11, "Number of dimensions can not exceed 11!!"); static_assert(ldim <= 100000, "Number of elements per dimension can not exceed 100000!!"); //const int32_t lagh = (int32_t)(2.0f*std::sqrt((float)ldim)); const std::size_t mvd_len = (std::size_t)(lagh*ndim*mdim); const int32_t totlen = ndim*ldim; __attribute__((aligned(64))) double xmean[ndim+6]; __attribute__((aligned(64))) double xstat[ndim]; // MULCOR data double * __restrict xcov = NULL; double * __restrict xcor = NULL; // MULSPE data double * __restrict xspec1 = NULL; double * __restrict xspec2 = NULL; double * __restrict xcoh1 = NULL; double * __restrict xcoh2 = NULL; FILE * fp = NULL; if(__builtin_expect(mvd_len > 11000ULL,1)) { #pragma omp parallel sections { #pragma omp section { xcov = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xcor = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xspec1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xspec2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xcoh1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } #pragma omp section { xcoh2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); } } //Single thread (main) checks for null pointers. const bool isnull = (NULL==xcov) || (NULL==xcor) || (NULL==xspec1) || (NULL==xspec2) || (NULL==xcoh1) || (NULL==xcoh2); if(__builtin_expect(isnull,0)) {MALLOC_FAILED} } else { xcov = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__bultin_expect(NULL==xcov,0)) {MALLOC_FAILED} xcor = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcor,0)) {MALLOC_FAILED} xspec1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xspec1,0)) {MALLOC_FAILED} xspec2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xspec2,0)) {MALLOC_FAILED} xcoh1 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcoh1,)) {MALLOC_FAILED} xcoh2 = reinterpret_cast<double*>(_mm_malloc(mvd_len*sizeof(double),64)); if(__builtin_expect(NULL==xcoh2,0)) {MALLOC_FAILED} } // Call MULCORF subroutine mulcorf_(&mvd_data[0],&totlen,&ndim,&lagh,&xmean[0],&xcov[0],&xcor[0]); // Call MULSPE subroutine mulspef_(&tot_len,&ndim,&lagh,&lagh,&xcov[0],&xspec1[0],&xspec2[0], &xstat[0],&xcoh1[0],&xcoh2[0]); if(fopen(&fp,fname,"a+") != 0) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } for(int32_t i = 0; i != ndim; ++i) {fprintf(fp,"HW Metrics: %s\n",metrics[i].c_str()); fprintf(fp, "Spectrum real part, imaginary part\n"); for(int32_t i = 0; i != (int32_t)(mvd_len); ++i) { fprintf(fp,"%.16f : %.16f\n",xspec1[i],xspec2[i]);} fprintf(fp, "Test Statistics\n"); for(int32_t i = 0; i != ndim; ++i) { fprintf(fp, "%.16f\n", xstat[i]);} fprintf(fp, "Simple coherence1, coherence2 \n"); for(int32_t i = 0; i != (int32_t)(mvd_len); ++i) {fprintf(fp,"%.16f , %.16f\n",xcoh1[i],xcoh2[i]);} fclose(fp); _mm_free(xcoh2); _mm_free(xcoh1); _mm_free(xspec2); _mm_free(xspec1); _mm_free(xcor); _mm_free(xcov); } /* Apply Time-Series analysis (Timsac) subroutine "UNIMAR". The data itself is invariant from the point of view of specific subroutine i.e. "UNIMAR". Attempt to calculate the descritpive statistics if result of Wilk-Shapiro normality test allows it. */ __attribute__((hot)) __attribute__((aligned(32))) template<int32_t len, int32_t lagh> void hw_perf_metrics_unimar(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict metric_name) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; __attribute__((aligned(64))) double xv[lagh+1]; __attribute__((aligned(64))) double xaic[lagh+1]; __attribute__((aligned(64))) double xdaic[lagh+1]; __attribute__((aligned(64))) double xa[lagh]; double xmean = 0.0; double xvar = 0.0; double xaicm = 0.0; double xvm = 0.0; int32_t xm = 0; char pad[4]; FILE * fp = NULL; DESCRIPTIVE_STATISTICS_DATA const bool init = false; // swilk init argument. a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} unimarf_(&data[0],&len,&lagh,&xmean,&xvar,&xv[0],&xaic[0],&xdaic[0], &xm,&xaicm,&xvm,&xa[0]); if(fopen(&fp,fname,"a+") != 0) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } fprintf(fp,"HW Metric: %s, Method: Univariate Autoregressive AR Model Fitting\n",metric_name); fprintf(fp,"\nmean=%.16f,var=%.16f,aicm=%.16f,vm=%.16f,xm=%d\n", xmean, xvar,xaicm,xvm,xm); fprintf(fp," V, AIC, DAIC\n"); for(int32_t i = 0; i != lagh+1; ++i) {fprintf(fp," %.16f %.16f %.16f %.16f\n",xv[i],xaic[i],xdaic[i]);} fprintf(fp, "A\n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fp," %.16f\n",xa[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(df32); _mm_free(a); } /* Apply Time-Series analysis (Timsac) subroutine "UNIBAR". The data itself is invariant from the point of view of specific subroutine i.e. "UNIBAR". */ __attribute__((hot)) __attribute__((aligned(32))) template<int32_t len,int32_t lagh> void hw_perf_metrics_unibar(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict metric_name) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; __attribute__((aligned(64))) double xv[lagh+1]; __attribute__((aligned(64))) double xaic[lagh+1]; __attribute__((aligned(64))) double xdaic[lagh+1]; __attribute__((aligned(64))) double xpa[lagh]; __attribute__((aligned(64))) double xbw[lagh+1]; __attribute__((aligned(64))) double xsbw[lagh]; __attribute__((aligned(64))) double xpab[lagh]; __attribute__((aligned(64))) double xa[lagh]; __attribute__((aligned(64))) double xpxx[128]; double xmean = 0.0; double xvar = 0.0; double xaicm = 0.0; double xvm = 0.0; double xaicb = 0.0; double xvb = 0.0; double xpn = 0.0; int32_t xm = 0; char pad[4]; FILE * fp = NULL; DESCRIPTIVE_STATISTICS_DATA const bool init = false; // swilk init argument. a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} unibarf_(&data[0],&len,&lagh,&xmean,&xvar,&xv[0],&xaic[0],&xdaic[0], &xm,&xaicm,&xvm,&xpa[0],&xbw[0],&xsbw[0],&xpab[0],&xaicb, &xvb,&xpn,&xa[0],&xpxx[0]); if(fopen(&fp,fname,"a+") != 0) { printf("File open error: %s\n",fname2); std::exit(EXIT_FAILURE); } fprintf(fp," Metric: %s, Method: Univariate Bayesian Method of AR Model Fitting\n"); fprintf(fp,"\nxmean=%.16f,xvar=%.16f,xaicm=%.16f,xvm=%.16f,xaicb=%.16f,xvb=%.16f,xpn=%.16f,xm=%d\n",xmean, xvar,xaicm,xvm,xaicb,xvb,xpn,xm); fprintf(fp," V, AIC, DAIC, BW\n"); for(int32_t i = 0; i != (lagh+1); ++i) {fprintf(fp," %.16f %.16f %.16f %.16f\n",xv[i],xaic[i],xdaic[i],xbw[i]);} fprintf(fp, " PA, SBW, PAB, A\n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fp," %.16f %.16f %.16f %.16f\n", xpa[i],xsbw[i],xpab[i],xa[i]);} fprintf(fp, " PXX\n"); for(int32_t i = 0; i != 128; ++i) {fprintf(fp, "%.16f\n",pxx[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(df32); _mm_free(a); } /* Apply Time-Series analysis (Timsac) subroutine "EXSAR". The data itself is invariant from the point of view of specific subroutine i.e. "EXSAR". */ __attribute__((hot)) __attribute__((aligned(32))); template<int32_t len,int32_t lagh> void hw_perf_metrics_exsar( const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict metric_name) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; __attribute__((aligned(64))) double xv[lagh+1]; __attribute__((aligned(64))) double xaic[lagh+1]; __attribute__((aligned(64))) double xdaic[lagh+1]; __attribute__((aligned(64))) double xa1[lagh]; __attribute__((aligned(64))) double xa2[lagh]; double xmean = 0.0; double xvar = 0.0; double xaicm = 0.0; double xsdm1 = 0.0; double xsdm2 = 0.0; char pad1[4]; int32_t xier = 0; int32_t xm = 0; char pad2[4]; FILE * fp = NULL; DESCRIPTIVE_STATISTICS_DATA const bool init = false; // swilk init argument. a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} exsarf_(&data[0],&len,&lagh,&xmean,&xvar,&xv[0],&xaic[0],&xdaic[0], &xm,&xaicm,&xsdm1,&xa1[0],&xsdm2,&xa2[0],&xier); if(fopen(&fp,fname,"a+") != 0) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } fprintf(fp,"HW Metric: %s, Maximum Likelihood Estimation\n", metric_name); fprintf(fp,"xmean=%.16f,xvar=%.16f,xaicm=%.16f,xsdm1=%.16f,xsdm2=%.16f,xier=%d,xm=%d\n", xmean,xvar,xaicm,xsdm1,xsdm2,xier,xm); fprintf(fp,"V, AIC, DAIC \n"); for(int32_t i = 0; i != lagh1; ++i) {fprintf(fp," %.16f %.16f %.16f\n", xv[i],xaic[i],xdaic[i]);} fprintf(fp," A1, A2 \n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fp, " %.16f %.16f\n", xa1[i],xa2[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(df32); _mm_free(a); } /* Apply Time-Series analysis (Timsac) subroutine "BISPEC". The data itself is invariant from the point of view of specific subroutine i.e. "BISPEC". */ __attribute__((hot)) __attribute__((aligned(32))); template<int32_t len,int32_t lagh> void hw_perf_metrics_bispec(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict metric_name) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); const int32_t lg12x = lagh*lagh+7; const std::size_t lagh_len = static_cast<std::size_t>(lg12x);\ const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; const bool init = false; // swilk init argument. __attribute__((aligned(64))) double acor[lagh+7]; __attribute__((aligned(64))) double acov[lagh+7]; __attribute__((aligned(64))) double pspec1[lagh+7]; __attribute__((aligned(64))) double psepc2[lagh+7]; __attribute__((aligned(64))) double sig[lagh+7]; double * __restrict mnt = NULL; double * __restrict ch = NULL; double * __restrict br = NULL; double * __restrict bi = NULL; FILE * fp = NULL; double xmean = 0.0; double xrat = 0.0; // BISPECF result DESCRIPTIVE_STATISTICS_DATA if(__builtin_expect(lg12x >= 100000,1)) { #pragma omp parallel section { #pragma omp section { mnt = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); } #pragma omp section { ch = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); } #pragma omp section { br = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); } #pragma omp section { bi = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); } #pragma omp section { a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); } #pragma omp section { df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); } } const bool isnull = (NULL==mnt) || (NULL==ch) || (NULL==ch) || (NULL==br) || (NULL==bi) || (NULL==a) || (NULL==df32); if(__builtin_exppect(isnull,0)) {MALLOC_FAILED} } else { mnt = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==mnt,0)) {MALLOC_FAILED} ch = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==ch,0)) {MALLOC_FAILED} br = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==br,0)) {MALLOC_FAILED} bi = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==bi,0)) {MALLOC_FAILED} a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} } thirmof_(&len,&lagh,&data[0],&xmean,&acov[0],&acor[0],&mnt[0]); bispecf_(&len,&lagh,&data[0],&mnt[0],&pspec1[0],&pspec2[0], &sig[0],&br[0],&bi[0],&xrat); if(fopen(&fp,fname,"a+") != 0) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } fprintf(fp,"HW Metric: %s, Bi-Spectrum Decomposition\n",metric_name); fprintf(fp,"xrat=%.16f\n",xrat); fprintf(fp," %s -- Smoothed Power Spectrum-1, Power Spectrum-2 and Significance\n", metric_name); for(int32_t i = 0; i != lagh; ++i) { fprintf(fp, "%.16f %.16f %.16f\n", psepc1[i],pspec2[i],sig[i]);} fprintf(fp, " %S -- Coherence, Real part, Imaginary part\n"); for(int32_t i = 0; i != lg12x; ++i) { fprintf(fp, "%.16f %.16f %.16f\n",ch[i],br[i],bi[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp1); _mm_free(bi); _mm_free(br); _mm_free(ch); _mm_free(mnt); _mm_free(df32); _mm_free(a); } /* Apply Time-Series analysis (Timsac) subroutine "THIRMO". The data itself is invariant from the point of view of specific subroutine i.e. "THIRMO". */ __attribute__((hot)) __attribute__((aligned(32))); template<int32_t len,int32_t lagh> void hw_perf_metrics_thirmo(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict metric_name) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); const int32_t lg12x = lagh*lagh+7; const std::size_t lagh_len = static_cast<std::size_t>(lg12x); const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; const bool init = false; // swilk init argument. __attribute__((aligned(64))) double acor[lagh+7]; __attribute__((aligned(64))) double acov[lagh+7]; double * __restrict mnt = NULL FILE * fp = NULL; double xmean = 0.0; DESCRIPTIVE_STATISTICS_DATA mnt = reinterpret_cast<double*>(_mm_malloc(lagh_len*sizeof(double),64)); if(__builtin_expect(NULL==mnt,0)) {MALLOC_FAILED} a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} thirmof_(&len,&lagh,&data[0],&xmean,&acov[0],&acor[0],&mnt[0]); if(fopen(&fp,fname,"a+") != 0) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } fprintf(fp,"HW Metric: %s Third Moments\n",metric_name); fprintf(fp,"xmean=%.16f\n",xmean); fprintf(fp,"ACOV, ACOR\n"); for(int32_t i = 0; i != lagh; ++i) { fprintf(fp, "%.16f %.16f\n", acov[i],acor[i]);} fprintf(fp," %S -- Third Moment\n",metric_name); for(int32_t i = 0; i != lg12x; ++i) { fprintf(fp, "%.16f\n",mnt[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(mnt); } /* Apply Time-Series analysis (Timsac) subroutine "AUTOCOR". The data itself is invariant from the point of view of specific subroutine i.e. "AUTOCOR". */ __attribute__((hot)) __attribute__((aligned(32))); template<int32_t len,int32_t lagh> void hw_perf_metrics_autocor(const double * __restrict __attribute__((aligned(64))) data, const char * __restrict fname, const char * __restrict metric_name) { static_assert(len <= 1000000, "Input data can not exceed: 1000000 elements!!"); const int32_t len2 = len/2; // shapiro-wilk 'a' array length. constexpr float w_limit = 0.05f; const bool init = false; // swilk init argument. __attribute__((aligned(64))) double acor[lagh+8]; __attribute__((aligned(64))) double acov[lagh+8]; double xmean = 0.0; FILE * fp = NULL; DESCRIPTIVE_STATISTICS_DATA a = reinterpret_cast<float*>(_mm_malloc((std::size_t)len2*sizeof(float),64)); if(__builtin_expect(NULL==a,0)) {MALLOC_FAILED} df32 = reinterpret_cast<float*>(_mm_malloc((std::size_t)len*sizeof(float),64)); if(__builtin_expect(NULL==df32,0)) {MALLOC_FAILED} autocorf_(&data,&len,&acov[0],&acor[0],&lagh,&xmean); if(fopen(&fp,fname,"a+") != 0) { printf("File open error: %s\n",fname); std::exit(EXIT_FAILURE); } fprintf(fp,"HW Metric: %s\n",metric_name); fprintf(fp,"xmean=%.16f\n",xmean); fprintf(fp," Series Autocorrelation and Autocovariance.\n"); for(int32_t i = 0; i != lagh; ++i) {fprintf(fp,"%.16f %.16f\n",acor[i],acov[i]);} cvrt_double_float_avx512_ptr1(&data[0],&df32[0],len); std::sort(df32,df32+len); swilk(init,&df32[0],len,len,len2,&a[0],w,pw,ifault); if(ifault!=0) printf("swilk ifault value is: %d\n",ifault); fprintf(fp,"HW Metric: %s -- Normality Test [Shapiro-Wilk] results: w=%.f9,pw=%.f9\n",metric_name,w,pw); if(pw<w_limit) fprintf(fp,"Warning!! -- 'pw' is less than normality limit -- Data is not normally distributed!!\n"); if(pw>w_limit) { fprintf(fp,"Descriptive Statistics calculations!!\n"); fprintf(fp,"====================================================\n"); srsd = relsd(&df32[0],len); fprintf(fp,"Sample Relative Standard Deviation: %.9f\n",srsd); svar = var(&df32[0],len); fprintf(fp,"Sample Variance: %.9f\n",svar); skewness_kurtosis(&df32[0],0,len-1,&skew,&kurt,0); fprintf(fp,"Skewness: %.9f, Kurtosis: %.9f\n",skew,kurt); autocor = autoco(&df32[0],len); fprintf(fp,"Autocorrelation: %.9f\n",autocor); loc(&df32[0],len,&xmid,&xmean,&xmidm,&xmed); fprintf(fp,"Central Tendency: mid=%.9f, mean=%.9f, midm=%.9f, med=%.9f\n", xmid,xmean,xmidm,xmed); smin = sample_min(&df32[0],len); fprintf(fp,"Sample Min: %.9f\n",smin); smax = sample_max(&df32[0],len); fprintf(fp,"Sample Max: %.9f\n",smax); scale(&df32[0],len,xrange,xsd,xrelsd,xvar); fprintf(fp,"Scale Estimations: range=%.9f, sd=%.9f, relsd=%.9f, var=%.9f\n", xrange,xsd,xrelsd,xvar); } fclose(fp); _mm_free(df32); _mm_free(a); } #endif /*__GMS_HW_METRICS_TIME_SERIES_ANALYSIS_H__*/
LAGraph_pagerank3c.c
//------------------------------------------------------------------------------ // LAGraph_pagerank3c: pagerank using a real semiring //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_pagerank3c: GAP-style PageRank, with import/export // See also LAGraph_pagerank3a, for the same computation without import/export. // This algorithm follows the specification given in the GAP Benchmark Suite: // https://arxiv.org/abs/1508.03619 which assumes that both A and A' are // already available, as are the row and column degrees. // The GAP Benchmark algorithm assumes the graph has no nodes with no out-going // edges (otherwise, a divide-by-zero occurs). In terms of the adjacency // matrix, it assumes there are no rows in A that have no entries. // For fastest results, the input matrix should stored in GxB_BY_COL format. // TODO: or use AT by row, since the GAP assumes both A and A' are available. #define LAGRAPH_EXPERIMENTAL_ASK_BEFORE_BENCHMARKING #include "LAGraph.h" #define LAGRAPH_FREE_WORK \ { \ LAGRAPH_FREE (I) ; \ LAGRAPH_FREE (pr) ; \ LAGRAPH_FREE (prior) ; \ GrB_free (&v) ; \ } #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE_WORK ; \ GrB_free (result) ; \ } GrB_Info LAGraph_pagerank3c // PageRank definition ( GrB_Vector *result, // output: array of LAGraph_PageRank structs GrB_Matrix A, // binary input graph, not modified const float *LA_RESTRICT d_out, // out degree of each node (GrB_FP32, size n) float damping, // damping factor (typically 0.85) int itermax, // maximum number of iterations int *iters // output: number of iterations taken ) { //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- GrB_Info info ; GrB_Index n, ncols ; GrB_Vector v = NULL ; GrB_Index *I = NULL ; float *LA_RESTRICT pr = NULL ; float *prior = NULL ; (*result) = NULL ; LAGr_Matrix_ncols (&ncols, A) ; LAGr_Matrix_nrows (&n, A) ; if (ncols != n) { LAGRAPH_ERROR ("matrix must be square", GrB_DIMENSION_MISMATCH) ; } // Teleport value const float teleport = (1 - damping) / n ; const float tol = 1e-4 ; float rdiff = 1 ; // first iteration is always done GrB_Type type = GrB_FP32 ; int nthreads = LAGraph_get_nthreads ( ) ; nthreads = LAGRAPH_MIN (n, nthreads) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // initializing pr and I pr = LAGraph_malloc (n, sizeof (float)) ; #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) // do not need I #else I = LAGraph_malloc (n, sizeof (GrB_Index)) ; #endif prior = LAGraph_malloc (n, sizeof (float)) ; if (pr == NULL || I == NULL || prior == NULL) { LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < n ; k++) { #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) // do not need I #else I [k] = k ; #endif pr [k] = 1.0/n ; } //-------------------------------------------------------------------------- // pagerank iterations //-------------------------------------------------------------------------- for ((*iters) = 0 ; (*iters) < itermax && rdiff > tol ; (*iters)++) { // printf ("\n============================ pagerank 3C iter: %d\n", (*iters)) ; // Importance calculation #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t i = 0 ; i < n; i++) { prior [i] = pr [i] ; pr [i] = damping * pr [i] / d_out [i] ; } #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_import_Full (&v, GrB_FP32, n, (void **) (&pr), NULL) ; #else // import pr and I into v LAGr_Vector_import (&v, GrB_FP32, n, n, &I, (void **) (&pr), NULL) ; #endif // Calculate total PR of all inbound vertices: v = A' * v LAGr_mxv (v, NULL, NULL, GxB_PLUS_SECOND_FP32, A, v, LAGraph_desc_tooo); GrB_Index nvals ; LAGr_Vector_nvals (&nvals, v) ; if (nvals != n) { LAGRAPH_ERROR ("Matrix must not have empty rows or columns!", GrB_PANIC) ; } #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_export_Full (&v, &type, &n, (void **) (&pr), NULL) ; #else // export v to pr and I LAGr_Vector_export (&v, &type, &n, &nvals, &I, (void **) (&pr), NULL) ; #endif // add teleport and check for convergence rdiff = 0 ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:rdiff) for (int64_t i = 0 ; i < n; i++) { pr [i] += teleport ; rdiff += fabsf (prior [i] - pr [i]) ; } } #if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,0) LAGr_Vector_import_Full (result, GrB_FP32, n, (void **) (&pr), NULL) ; #else // import result (pr and I) into final result LAGr_Vector_import (result, GrB_FP32, n, n, &I, (void **) (&pr), NULL) ; #endif LAGRAPH_FREE_WORK ; return (GrB_SUCCESS) ; }
GB_binop__pair_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_uint32) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__pair_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint32_t // A type: uint32_t // A pattern? 1 // B type: uint32_t // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT32 || GxB_NO_PAIR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
QSkycubeG.h
#include <unordered_map> #include <stdint.h> #include <vector> #include "QSkycube.h" #include "BSkyTreeP.h" #include "BSkyTreeEM.h" #pragma once #ifndef _SKYCUBEG_H #define _SKYCUBEG_H bool inverse_snoob(int a, int b) { return __builtin_popcount(a) < __builtin_popcount(b); } void AddMissingSkyPoint(int nSAttID, std::vector<Point>& PointList, std::vector<Point>& Skyline) { nGlobalAtt = nSAttID; sort(Skyline.begin(), Skyline.end(), CompareAtt); std::vector<Point> CPointList; int nPnt = 0, nSky = 0, nNumPnt = (int)PointList.size(), nNumSky = (int)Skyline.size(); while (nPnt < nNumPnt && nSky < nNumSky) { if (PointList[nPnt][nSAttID] < Skyline[nSky][nSAttID]) nPnt++; else if (PointList[nPnt][nSAttID] > Skyline[nSky][nSAttID]) nSky++; else { // Add missing points that have the same values in nSAttID attribute. while (PointList[nPnt][nSAttID] == Skyline[nSky][nSAttID]) { CPointList.push_back(PointList[nPnt++]); if (nPnt == nNumPnt) break; } nSky++; } } Skyline = CPointList; } void AddMissingSkyPointP(int nSAttID, std::vector<Point>& PointList, std::vector<Point>& Skyline) { //nGlobalAtt = nSAttID; sort(Skyline.begin(), Skyline.end(), compareAttStruct(nSAttID)); std::vector<Point> CPointList; int nPnt = 0, nSky = 0, nNumPnt = (int)PointList.size(), nNumSky = (int)Skyline.size(); while (nPnt < nNumPnt && nSky < nNumSky) { if (PointList[nPnt][nSAttID] < Skyline[nSky][nSAttID]) nPnt++; else if (PointList[nPnt][nSAttID] > Skyline[nSky][nSAttID]) nSky++; else { // Add missing points that have the same values in nSAttID attribute. while (PointList[nPnt][nSAttID] == Skyline[nSky][nSAttID]) { CPointList.push_back(PointList[nPnt++]); if (nPnt == nNumPnt) break; } nSky++; } } Skyline = CPointList; } void AddMissingSkyPointP_pointer(int nSAttID, std::vector<int>& PointList, std::vector<int>& Skyline) { //nGlobalAtt = nSAttID; sort(Skyline.begin(), Skyline.end(), compareAttStruct_pointer(nSAttID)); std::vector<int> CPointList; int nPnt = 0, nSky = 0, nNumPnt = (int)PointList.size(), nNumSky = (int)Skyline.size(); while (nPnt < nNumPnt && nSky < nNumSky) { if (GlobalData[PointList[nPnt]][nSAttID] < GlobalData[Skyline[nSky]][nSAttID]) nPnt++; else if (GlobalData[PointList[nPnt]][nSAttID] > GlobalData[Skyline[nSky]][nSAttID]) nSky++; else { // Add missing points that have the same values in nSAttID attribute. while (GlobalData[PointList[nPnt]][nSAttID] == GlobalData[Skyline[nSky]][nSAttID]) { CPointList.push_back(PointList[nPnt++]); if (nPnt == nNumPnt) break; } nSky++; } } Skyline = CPointList; } void AddMissingNonSkyPoint(int nCuboid, std::vector<Point>* EqlPointList, std::vector<Point>& Skyline) { // Append missing points from non-skyline points. for (int nPCuboid = 1; nPCuboid <= nCuboid; nPCuboid++) { if ((nPCuboid & nCuboid) == nPCuboid) { if (!EqlPointList[nPCuboid].empty()) { std::vector<Point>::iterator it = Skyline.end(); Skyline.insert(it, EqlPointList[nPCuboid].begin(), EqlPointList[nPCuboid].end()); } } } } void AddMissingNonSkyPoint_pointer(int nCuboid, std::vector<int>* EqlPointList, std::vector<int>& Skyline) { // Append missing points from non-skyline points. for (int nPCuboid = 1; nPCuboid <= nCuboid; nPCuboid++) { if ((nPCuboid & nCuboid) == nPCuboid) { if (!EqlPointList[nPCuboid].empty()) { std::vector<int>::iterator it = Skyline.end(); Skyline.insert(it, EqlPointList[nPCuboid].begin(), EqlPointList[nPCuboid].end()); } } } } // Extend the QSkycube algorithm using sorted lists (also used in TDS). template<int NUM_DIMS> void ExecuteQSkycubeGS(int nNumAtt, std::vector<Point>& PointList, std::vector<Point>* SkyCube) { int nTotalCuboid = (1 << nNumAtt) - 1; std::vector<int>* SubspaceList = new std::vector<int>[nTotalCuboid]; SNode* STreeCube = new SNode[nTotalCuboid]; SetSubspaceList<NUM_DIMS>(nNumAtt, SubspaceList); // Set the attribute ids for each subspace. ExecuteBSkyTree(SubspaceList[0], PointList, STreeCube[0]); InsertSkyline(SkyCube[0], STreeCube[0]); std::vector<vector<Point> > SPointList(nNumAtt + 1); SortPointList(nNumAtt, PointList, SPointList); // Sort PointList on every attribute. int nSAttID; for (int nCuboid = 1; nCuboid < nTotalCuboid; nCuboid++) { std::vector<Point> CPointList = FindSingleParent<NUM_DIMS>(nNumAtt, nCuboid, SubspaceList, STreeCube); // Add missing points from the sorted list (entire dataset). nSAttID = SubspaceList[nCuboid][0]; AddMissingSkyPoint(nSAttID, SPointList[nSAttID], CPointList); ExecuteBSkyTree(SubspaceList[nCuboid], CPointList, STreeCube[nCuboid]); InsertSkyline(SkyCube[nCuboid], STreeCube[nCuboid]); } for (int nCuboid = 0; nCuboid < nTotalCuboid; nCuboid++) ClearSkyTree(STreeCube[nCuboid]); delete[] SubspaceList; delete[] STreeCube; } // Extend the QSkycube algorithm using the equivalence lattice). template<int NUM_DIMS> void ExecuteQSkycubeGL(int nNumAtt, std::vector<Point>& PointList, std::vector<Point>* SkyCube) { int nTotalCuboid = (1 << nNumAtt) - 1; std::vector<int>* SubspaceList = new std::vector<int>[nTotalCuboid]; SNode* STreeCube = new SNode[nTotalCuboid]; SetSubspaceList<NUM_DIMS>(nNumAtt, SubspaceList); // Set the attribute ids for each subspace. // Exploit a lattice to cover points with equal values. std::vector<Point>* EqlPointList = new std::vector<Point>[nTotalCuboid]; ExecuteBSkyTreeEM(SubspaceList[0], PointList, EqlPointList, STreeCube[0]); InsertSkyline(SkyCube[0], STreeCube[0]); std::vector<int> iteration_order; std::vector<vector<Point> > SPointList(nNumAtt + 1); SortPointList(nNumAtt, SkyCube[0], SPointList); // Sort PointList on every single attribute. int nSAttID; unsigned long long nNumTotalPnt = 0; for (int nCuboid = 1; nCuboid < nTotalCuboid; nCuboid++){ std::vector<Point> CPointList = FindSingleParent<NUM_DIMS>(nNumAtt, nCuboid, SubspaceList, STreeCube); // Add missing points from the sorted list (full space skyline). nSAttID = SubspaceList[nCuboid][0]; AddMissingSkyPoint(nSAttID, SPointList[nSAttID], CPointList); // Add missing points from the lattice. AddMissingNonSkyPoint(nCuboid, EqlPointList, CPointList); nNumTotalPnt += (unsigned long long)CPointList.size(); ExecuteBSkyTree(SubspaceList[nCuboid], CPointList, STreeCube[nCuboid]); InsertSkyline(SkyCube[nCuboid], STreeCube[nCuboid]); } nGMeasure = 0; nGMeasure = nNumTotalPnt; for (int nCuboid = 0; nCuboid < nTotalCuboid; nCuboid++) ClearSkyTree(STreeCube[nCuboid]); delete[] EqlPointList; delete[] SubspaceList; delete[] STreeCube; } //with memory usage reduction. template<int NUM_DIMS> void ExecuteQSkycubeGLPMR(int nNumAtt, std::vector<Point>& PointList, std::vector<Point>* SkyCube, int t, int max_d) { int nTotalCuboid = (1 << nNumAtt) - 1; std::vector<int>* SubspaceList = new std::vector<int>[nTotalCuboid]; SNode* STreeCube = new SNode[nTotalCuboid]; SetSubspaceList<NUM_DIMS>(nNumAtt, SubspaceList); // Set the attribute ids for each subspace. // Exploit a lattice to cover points with equal values. std::vector<Point>* EqlPointList = new std::vector<Point>[nTotalCuboid]; ExecuteBSkyTreeEM(SubspaceList[0], PointList, EqlPointList, STreeCube[0]); InsertSkyline(SkyCube[0], STreeCube[0]); std::vector<int> iteration_order; for(int i = 1; i < nTotalCuboid; i++){ iteration_order.push_back(i); } std::sort(iteration_order.begin(),iteration_order.end(),inverse_snoob); std::vector<vector<int> > lattice_level_order; int last_popc = 0; for (auto it = iteration_order.begin(); it != iteration_order.end(); ++it) { int next_popc = __builtin_popcount(*it); if (next_popc != last_popc) { std::vector<int> next_list; next_list.push_back(*it); lattice_level_order.push_back(next_list); last_popc = next_popc; } else { lattice_level_order[lattice_level_order.size() - 1].push_back(*it); } } std::vector<vector<Point> > SPointList(nNumAtt + 1); SortPointList(nNumAtt, SkyCube[0], SPointList); // Sort PointList on every single attribute. unsigned long long nNumTotalPnt = 0; int start_level = 0; if(max_d < nNumAtt) { start_level = nNumAtt - 1 - max_d; } for (unsigned int i = start_level; i < lattice_level_order.size(); i++) { if(i > 2+start_level){ //clean up trees no longer used to minimize memory footprint for(auto it = lattice_level_order[i-2].begin(); it != lattice_level_order[i-2].end(); ++it){ ClearSkyTree(STreeCube[*it]); } } if((i == 2+start_level)){ ClearSkyTree(STreeCube[0]); } #pragma omp parallel for schedule(dynamic, 1) num_threads( t ) for (unsigned int j = 0; j < lattice_level_order[i].size(); j++) { int nCuboid = lattice_level_order[i][j]; std::vector<Point> CPointList; //the first level we compute for if(i > start_level) { CPointList = FindSingleParent<NUM_DIMS>(nNumAtt, nCuboid, SubspaceList, STreeCube); } else { //use full skyline for(auto it = SkyCube[0].begin(); it != SkyCube[0].end(); it++) { CPointList.push_back(*it); } } // Add missing points from the sorted list (full space skyline). int nSAttID = SubspaceList[nCuboid][0]; AddMissingSkyPointP(nSAttID, SPointList[nSAttID], CPointList); // Add missing points from the lattice. AddMissingNonSkyPoint(nCuboid, EqlPointList, CPointList); ExecuteBSkyTree(SubspaceList[nCuboid], CPointList, STreeCube[nCuboid]); InsertSkyline(SkyCube[nCuboid], STreeCube[nCuboid]); } } nGMeasure = 0; nGMeasure = nNumTotalPnt; delete[] EqlPointList; delete[] SubspaceList; } #endif
omp_detach_taskwait.c
// RUN: %libomp-compile -fopenmp-version=50 && env OMP_NUM_THREADS='3' %libomp-run // RUN: %libomp-compile -fopenmp-version=50 && env OMP_NUM_THREADS='1' %libomp-run // Checked gcc 10.1 still does not support detach clause on task construct. // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8, gcc-9, gcc-10 // clang supports detach clause since version 11. // UNSUPPORTED: clang-10, clang-9, clang-8, clang-7 // icc compiler does not support detach clause. // UNSUPPORTED: icc #include <omp.h> int main() { #pragma omp parallel #pragma omp master { omp_event_handle_t event; #pragma omp task detach(event) { omp_fulfill_event(event); } #pragma omp taskwait } return 0; }
resample.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS AAA M M PPPP L EEEEE % % R R E SS A A MM MM P P L E % % RRRR EEE SSS AAAAA M M M PPPP L EEE % % R R E SS A A M M P L E % % R R EEEEE SSSSS A A M M P LLLLL EEEEE % % % % % % MagickCore Pixel Resampling Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % August 2007 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/color-private.h" #include "MagickCore/cache.h" #include "MagickCore/draw.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/resample.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/signature-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/option.h" /* EWA Resampling Options */ /* select ONE resampling method */ #define EWA 1 /* Normal EWA handling - raw or clamped */ /* if 0 then use "High Quality EWA" */ #define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */ #define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */ /* output debugging information */ #define DEBUG_ELLIPSE 0 /* output ellipse info for debug */ #define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */ #define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */ #if ! FILTER_DIRECT #define WLUT_WIDTH 1024 /* size of the filter cache */ #endif /* Typedef declarations. */ struct _ResampleFilter { CacheView *view; Image *image; ExceptionInfo *exception; MagickBooleanType debug; /* Information about image being resampled */ ssize_t image_area; PixelInterpolateMethod interpolate; VirtualPixelMethod virtual_pixel; FilterType filter; /* processing settings needed */ MagickBooleanType limit_reached, do_interpolate, average_defined; PixelInfo average_pixel; /* current ellipitical area being resampled around center point */ double A, B, C, Vlimit, Ulimit, Uwidth, slope; #if FILTER_LUT /* LUT of weights for filtered average in elliptical area */ double filter_lut[WLUT_WIDTH]; #else /* Use a Direct call to the filter functions */ ResizeFilter *filter_def; double F; #endif /* the practical working support of the filter */ double support; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResampleFilter() initializes the information resample needs do to a % scaled lookup of a color from an image, using area sampling. % % The algorithm is based on a Elliptical Weighted Average, where the pixels % found in a large elliptical area is averaged together according to a % weighting (filter) function. For more details see "Fundamentals of Texture % Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17, % 1989. Available for free from, http://www.cs.cmu.edu/~ph/ % % As EWA resampling (or any sort of resampling) can require a lot of % calculations to produce a distorted scaling of the source image for each % output pixel, the ResampleFilter structure generated holds that information % between individual image resampling. % % This function will make the appropriate AcquireCacheView() calls % to view the image, calling functions do not need to open a cache view. % % Usage Example... % resample_filter=AcquireResampleFilter(image,exception); % SetResampleFilter(resample_filter, GaussianFilter); % for (y=0; y < (ssize_t) image->rows; y++) { % for (x=0; x < (ssize_t) image->columns; x++) { % u= ....; v= ....; % ScaleResampleFilter(resample_filter, ... scaling vectors ...); % (void) ResamplePixelColor(resample_filter,u,v,&pixel); % ... assign resampled pixel value ... % } % } % DestroyResampleFilter(resample_filter); % % The format of the AcquireResampleFilter method is: % % ResampleFilter *AcquireResampleFilter(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ResampleFilter *AcquireResampleFilter(const Image *image, ExceptionInfo *exception) { ResampleFilter *resample_filter; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); resample_filter=(ResampleFilter *) AcquireCriticalMemory(sizeof( *resample_filter)); (void) memset(resample_filter,0,sizeof(*resample_filter)); resample_filter->exception=exception; resample_filter->image=ReferenceImage((Image *) image); resample_filter->view=AcquireVirtualCacheView(resample_filter->image, exception); resample_filter->debug=IsEventLogging(); resample_filter->image_area=(ssize_t) (image->columns*image->rows); resample_filter->average_defined=MagickFalse; resample_filter->signature=MagickCoreSignature; SetResampleFilter(resample_filter,image->filter); (void) SetResampleFilterInterpolateMethod(resample_filter,image->interpolate); (void) SetResampleFilterVirtualPixelMethod(resample_filter, GetImageVirtualPixelMethod(image)); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResampleFilter() finalizes and cleans up the resampling % resample_filter as returned by AcquireResampleFilter(), freeing any memory % or other information as needed. % % The format of the DestroyResampleFilter method is: % % ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter) % % A description of each parameter follows: % % o resample_filter: resampling information structure % */ MagickExport ResampleFilter *DestroyResampleFilter( ResampleFilter *resample_filter) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->view=DestroyCacheView(resample_filter->view); resample_filter->image=DestroyImage(resample_filter->image); #if ! FILTER_LUT resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def); #endif resample_filter->signature=(~MagickCoreSignature); resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResamplePixelColor() samples the pixel values surrounding the location % given using an elliptical weighted average, at the scale previously % calculated, and in the most efficent manner possible for the % VirtualPixelMethod setting. % % The format of the ResamplePixelColor method is: % % MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter, % const double u0,const double v0,PixelInfo *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o u0,v0: A double representing the center of the area to resample, % The distortion transformed transformed x,y coordinate. % % o pixel: the resampled pixel is returned here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResamplePixelColor( ResampleFilter *resample_filter,const double u0,const double v0, PixelInfo *pixel,ExceptionInfo *exception) { MagickBooleanType status; ssize_t u,v, v1, v2, uw, hit; double u1; double U,V,Q,DQ,DDQ; double divisor_c,divisor_m; double weight; const Quantum *pixels; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); status=MagickTrue; /* GetPixelInfo(resample_filter->image,pixel); */ if ( resample_filter->do_interpolate ) { status=InterpolatePixelInfo(resample_filter->image,resample_filter->view, resample_filter->interpolate,u0,v0,pixel,resample_filter->exception); return(status); } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0); #endif /* Does resample area Miss the image Proper? If and that area a simple solid color - then simply return that color! This saves a lot of calculation when resampling outside the bounds of the source image. However it probably should be expanded to image bounds plus the filters scaled support size. */ hit = 0; switch ( resample_filter->virtual_pixel ) { case BackgroundVirtualPixelMethod: case TransparentVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case WhiteVirtualPixelMethod: case MaskVirtualPixelMethod: if ( resample_filter->limit_reached || u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 || v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; break; case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 + resample_filter->Ulimit < 0.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) ) hit++; break; case HorizontalTileVirtualPixelMethod: if ( v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; /* outside the horizontally tiled images. */ break; case VerticalTileVirtualPixelMethod: if ( u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 ) hit++; /* outside the vertically tiled images. */ break; case DitherVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 + resample_filter->Ulimit < -32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) ) hit++; break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: /* resampling of area is always needed - no VP limits */ break; } if ( hit ) { /* The area being resampled is simply a solid color * just return a single lookup color. * * Should this return the users requested interpolated color? */ status=InterpolatePixelInfo(resample_filter->image,resample_filter->view, IntegerInterpolatePixel,u0,v0,pixel,resample_filter->exception); return(status); } /* When Scaling limits reached, return an 'averaged' result. */ if ( resample_filter->limit_reached ) { switch ( resample_filter->virtual_pixel ) { /* This is always handled by the above, so no need. case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case GrayVirtualPixelMethod, case WhiteVirtualPixelMethod case MaskVirtualPixelMethod: */ case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: case DitherVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: /* We need an average edge pixel, from the correct edge! How should I calculate an average edge color? Just returning an averaged neighbourhood, works well in general, but falls down for TileEdge methods. This needs to be done properly!!!!!! */ status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,AverageInterpolatePixel,u0,v0,pixel, resample_filter->exception); break; case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: /* just return the background pixel - Is there more direct way? */ status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel, resample_filter->exception); break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case CheckerTileVirtualPixelMethod: default: /* generate a average color of the WHOLE image */ if ( resample_filter->average_defined == MagickFalse ) { Image *average_image; CacheView *average_view; GetPixelInfo(resample_filter->image,(PixelInfo *) &resample_filter->average_pixel); resample_filter->average_defined=MagickTrue; /* Try to get an averaged pixel color of whole image */ average_image=ResizeImage(resample_filter->image,1,1,BoxFilter, resample_filter->exception); if (average_image == (Image *) NULL) { *pixel=resample_filter->average_pixel; /* FAILED */ break; } average_view=AcquireVirtualCacheView(average_image,exception); pixels=GetCacheViewVirtualPixels(average_view,0,0,1,1, resample_filter->exception); if (pixels == (const Quantum *) NULL) { average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); *pixel=resample_filter->average_pixel; /* FAILED */ break; } GetPixelInfoPixel(resample_filter->image,pixels, &(resample_filter->average_pixel)); average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod ) { /* CheckerTile is a alpha blend of the image's average pixel color and the current background color */ /* image's average pixel color */ weight = QuantumScale*((double) resample_filter->average_pixel.alpha); resample_filter->average_pixel.red *= weight; resample_filter->average_pixel.green *= weight; resample_filter->average_pixel.blue *= weight; divisor_c = weight; /* background color */ weight = QuantumScale*((double) resample_filter->image->background_color.alpha); resample_filter->average_pixel.red += weight*resample_filter->image->background_color.red; resample_filter->average_pixel.green += weight*resample_filter->image->background_color.green; resample_filter->average_pixel.blue += weight*resample_filter->image->background_color.blue; resample_filter->average_pixel.alpha += resample_filter->image->background_color.alpha; divisor_c += weight; /* alpha blend */ resample_filter->average_pixel.red /= divisor_c; resample_filter->average_pixel.green /= divisor_c; resample_filter->average_pixel.blue /= divisor_c; resample_filter->average_pixel.alpha /= 2; /* 50% blend */ } } *pixel=resample_filter->average_pixel; break; } return(status); } /* Initialize weighted average data collection */ hit = 0; divisor_c = 0.0; divisor_m = 0.0; pixel->red = pixel->green = pixel->blue = 0.0; if (pixel->colorspace == CMYKColorspace) pixel->black = 0.0; if (pixel->alpha_trait != UndefinedPixelTrait) pixel->alpha = 0.0; /* Determine the parellelogram bounding box fitted to the ellipse centered at u0,v0. This area is bounding by the lines... */ v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */ v2 = (ssize_t)floor(v0 + resample_filter->Vlimit); /* scan line start and width accross the parallelogram */ u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth; uw = (ssize_t)(2.0*resample_filter->Uwidth)+1; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2); (void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw); #else # define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */ #endif /* Do weighted resampling of all pixels, within the scaled ellipse, bound by a Parellelogram fitted to the ellipse. */ DDQ = 2*resample_filter->A; for( v=v1; v<=v2; v++ ) { #if DEBUG_HIT_MISS long uu = ceil(u1); /* actual pixel location (for debug only) */ (void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v); #endif u = (ssize_t)ceil(u1); /* first pixel in scanline */ u1 += resample_filter->slope; /* start of next scan line */ /* location of this first pixel, relative to u0,v0 */ U = (double)u-u0; V = (double)v-v0; /* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */ Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V; DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V; /* get the scanline of pixels for this v */ pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw, 1,resample_filter->exception); if (pixels == (const Quantum *) NULL) return(MagickFalse); /* count up the weighted pixel colors */ for( u=0; u<uw; u++ ) { #if FILTER_LUT /* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */ if ( Q < (double)WLUT_WIDTH ) { weight = resample_filter->filter_lut[(int)Q]; #else /* Note that the ellipse has been pre-scaled so F = support^2 */ if ( Q < (double)resample_filter->F ) { weight = GetResizeFilterWeight(resample_filter->filter_def, sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */ #endif pixel->alpha += weight*GetPixelAlpha(resample_filter->image,pixels); divisor_m += weight; if (pixel->alpha_trait != UndefinedPixelTrait) weight *= QuantumScale*((double) GetPixelAlpha(resample_filter->image,pixels)); pixel->red += weight*GetPixelRed(resample_filter->image,pixels); pixel->green += weight*GetPixelGreen(resample_filter->image,pixels); pixel->blue += weight*GetPixelBlue(resample_filter->image,pixels); if (pixel->colorspace == CMYKColorspace) pixel->black += weight*GetPixelBlack(resample_filter->image,pixels); divisor_c += weight; hit++; #if DEBUG_HIT_MISS /* mark the pixel according to hit/miss of the ellipse */ (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } else { (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } uu++; #else } #endif pixels+=GetPixelChannels(resample_filter->image); Q += DQ; DQ += DDQ; } } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) ); #endif /* Result sanity check -- this should NOT happen */ if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) { /* not enough pixels, or bad weighting in resampling, resort to direct interpolation */ #if DEBUG_NO_PIXEL_HIT pixel->alpha = pixel->red = pixel->green = pixel->blue = 0; pixel->red = QuantumRange; /* show pixels for which EWA fails */ #else status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); #endif return status; } /* Finialize results of resampling */ divisor_m = 1.0/divisor_m; if (pixel->alpha_trait != UndefinedPixelTrait) pixel->alpha = (double) ClampToQuantum(divisor_m*pixel->alpha); divisor_c = 1.0/divisor_c; pixel->red = (double) ClampToQuantum(divisor_c*pixel->red); pixel->green = (double) ClampToQuantum(divisor_c*pixel->green); pixel->blue = (double) ClampToQuantum(divisor_c*pixel->blue); if (pixel->colorspace == CMYKColorspace) pixel->black = (double) ClampToQuantum(divisor_c*pixel->black); return(MagickTrue); } #if EWA && EWA_CLAMP /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % - C l a m p U p A x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampUpAxes() function converts the input vectors into a major and % minor axis unit vectors, and their magnitude. This allows us to % ensure that the ellipse generated is never smaller than the unit % circle and thus never too small for use in EWA resampling. % % This purely mathematical 'magic' was provided by Professor Nicolas % Robidoux and his Masters student Chantal Racette. % % Reference: "We Recommend Singular Value Decomposition", David Austin % http://www.ams.org/samplings/feature-column/fcarc-svd % % By generating major and minor axis vectors, we can actually use the % ellipse in its "canonical form", by remapping the dx,dy of the % sampled point into distances along the major and minor axis unit % vectors. % % Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form */ static inline void ClampUpAxes(const double dux, const double dvx, const double duy, const double dvy, double *major_mag, double *minor_mag, double *major_unit_x, double *major_unit_y, double *minor_unit_x, double *minor_unit_y) { /* * ClampUpAxes takes an input 2x2 matrix * * [ a b ] = [ dux duy ] * [ c d ] = [ dvx dvy ] * * and computes from it the major and minor axis vectors [major_x, * major_y] and [minor_x,minor_y] of the smallest ellipse containing * both the unit disk and the ellipse which is the image of the unit * disk by the linear transformation * * [ dux duy ] [S] = [s] * [ dvx dvy ] [T] = [t] * * (The vector [S,T] is the difference between a position in output * space and [X,Y]; the vector [s,t] is the difference between a * position in input space and [x,y].) */ /* * Output: * * major_mag is the half-length of the major axis of the "new" * ellipse. * * minor_mag is the half-length of the minor axis of the "new" * ellipse. * * major_unit_x is the x-coordinate of the major axis direction vector * of both the "old" and "new" ellipses. * * major_unit_y is the y-coordinate of the major axis direction vector. * * minor_unit_x is the x-coordinate of the minor axis direction vector. * * minor_unit_y is the y-coordinate of the minor axis direction vector. * * Unit vectors are useful for computing projections, in particular, * to compute the distance between a point in output space and the * center of a unit disk in output space, using the position of the * corresponding point [s,t] in input space. Following the clamping, * the square of this distance is * * ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2 * + * ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2 * * If such distances will be computed for many [s,t]'s, it makes * sense to actually compute the reciprocal of major_mag and * minor_mag and multiply them by the above unit lengths. * * Now, if you want to modify the input pair of tangent vectors so * that it defines the modified ellipse, all you have to do is set * * newdux = major_mag * major_unit_x * newdvx = major_mag * major_unit_y * newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y * newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x * * and use these tangent vectors as if they were the original ones. * Usually, this is a drastic change in the tangent vectors even if * the singular values are not clamped; for example, the minor axis * vector always points in a direction which is 90 degrees * counterclockwise from the direction of the major axis vector. */ /* * Discussion: * * GOAL: Fix things so that the pullback, in input space, of a disk * of radius r in output space is an ellipse which contains, at * least, a disc of radius r. (Make this hold for any r>0.) * * ESSENCE OF THE METHOD: Compute the product of the first two * factors of an SVD of the linear transformation defining the * ellipse and make sure that both its columns have norm at least 1. * Because rotations and reflexions map disks to themselves, it is * not necessary to compute the third (rightmost) factor of the SVD. * * DETAILS: Find the singular values and (unit) left singular * vectors of Jinv, clampling up the singular values to 1, and * multiply the unit left singular vectors by the new singular * values in order to get the minor and major ellipse axis vectors. * * Image resampling context: * * The Jacobian matrix of the transformation at the output point * under consideration is defined as follows: * * Consider the transformation (x,y) -> (X,Y) from input locations * to output locations. (Anthony Thyssen, elsewhere in resample.c, * uses the notation (u,v) -> (x,y).) * * The Jacobian matrix of the transformation at (x,y) is equal to * * J = [ A, B ] = [ dX/dx, dX/dy ] * [ C, D ] [ dY/dx, dY/dy ] * * that is, the vector [A,C] is the tangent vector corresponding to * input changes in the horizontal direction, and the vector [B,D] * is the tangent vector corresponding to input changes in the * vertical direction. * * In the context of resampling, it is natural to use the inverse * Jacobian matrix Jinv because resampling is generally performed by * pulling pixel locations in the output image back to locations in * the input image. Jinv is * * Jinv = [ a, b ] = [ dx/dX, dx/dY ] * [ c, d ] [ dy/dX, dy/dY ] * * Note: Jinv can be computed from J with the following matrix * formula: * * Jinv = 1/(A*D-B*C) [ D, -B ] * [ -C, A ] * * What we do is modify Jinv so that it generates an ellipse which * is as close as possible to the original but which contains the * unit disk. This can be accomplished as follows: * * Let * * Jinv = U Sigma V^T * * be an SVD decomposition of Jinv. (The SVD is not unique, but the * final ellipse does not depend on the particular SVD.) * * We could clamp up the entries of the diagonal matrix Sigma so * that they are at least 1, and then set * * Jinv = U newSigma V^T. * * However, we do not need to compute V for the following reason: * V^T is an orthogonal matrix (that is, it represents a combination * of rotations and reflexions) so that it maps the unit circle to * itself. For this reason, the exact value of V does not affect the * final ellipse, and we can choose V to be the identity * matrix. This gives * * Jinv = U newSigma. * * In the end, we return the two diagonal entries of newSigma * together with the two columns of U. */ /* * ClampUpAxes was written by Nicolas Robidoux and Chantal Racette * of Laurentian University with insightful suggestions from Anthony * Thyssen and funding from the National Science and Engineering * Research Council of Canada. It is distinguished from its * predecessors by its efficient handling of degenerate cases. * * The idea of clamping up the EWA ellipse's major and minor axes so * that the result contains the reconstruction kernel filter support * is taken from Andreas Gustaffson's Masters thesis "Interactive * Image Warping", Helsinki University of Technology, Faculty of * Information Technology, 59 pages, 1993 (see Section 3.6). * * The use of the SVD to clamp up the singular values of the * Jacobian matrix of the pullback transformation for EWA resampling * is taken from the astrophysicist Craig DeForest. It is * implemented in his PDL::Transform code (PDL = Perl Data * Language). */ const double a = dux; const double b = duy; const double c = dvx; const double d = dvy; /* * n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the * squares of the singular values of Jinv. */ const double aa = a*a; const double bb = b*b; const double cc = c*c; const double dd = d*d; /* * Eigenvectors of n are left singular vectors of Jinv. */ const double n11 = aa+bb; const double n12 = a*c+b*d; const double n21 = n12; const double n22 = cc+dd; const double det = a*d-b*c; const double twice_det = det+det; const double frobenius_squared = n11+n22; const double discriminant = (frobenius_squared+twice_det)*(frobenius_squared-twice_det); /* * In exact arithmetic, discriminant can't be negative. In floating * point, it can, because of the bad conditioning of SVD * decompositions done through the associated normal matrix. */ const double sqrt_discriminant = sqrt(discriminant > 0.0 ? discriminant : 0.0); /* * s1 is the largest singular value of the inverse Jacobian * matrix. In other words, its reciprocal is the smallest singular * value of the Jacobian matrix itself. * If s1 = 0, both singular values are 0, and any orthogonal pair of * left and right factors produces a singular decomposition of Jinv. */ /* * Initially, we only compute the squares of the singular values. */ const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant); /* * s2 the smallest singular value of the inverse Jacobian * matrix. Its reciprocal is the largest singular value of the * Jacobian matrix itself. */ const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant); const double s1s1minusn11 = s1s1-n11; const double s1s1minusn22 = s1s1-n22; /* * u1, the first column of the U factor of a singular decomposition * of Jinv, is a (non-normalized) left singular vector corresponding * to s1. It has entries u11 and u21. We compute u1 from the fact * that it is an eigenvector of n corresponding to the eigenvalue * s1^2. */ const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11; const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22; /* * The following selects the largest row of n-s1^2 I as the one * which is used to find the eigenvector. If both s1^2-n11 and * s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case, * any vector is an eigenvector; in addition, norm below is equal to * zero, and, in exact arithmetic, this is the only case in which * norm = 0. So, setting u1 to the simple but arbitrary vector [1,0] * if norm = 0 safely takes care of all cases. */ const double temp_u11 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 ); const double temp_u21 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 ); const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21); /* * Finalize the entries of first left singular vector (associated * with the largest singular value). */ const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 ); const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 ); /* * Clamp the singular values up to 1. */ *major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) ); *minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) ); /* * Return the unit major and minor axis direction vectors. */ *major_unit_x = u11; *major_unit_y = u21; *minor_unit_x = -u21; *minor_unit_y = u11; } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleResampleFilter() does all the calculations needed to resample an image % at a specific scale, defined by two scaling vectors. This not using % a orthogonal scaling, but two distorted scaling vectors, to allow the % generation of a angled ellipse. % % As only two deritive scaling vectors are used the center of the ellipse % must be the center of the lookup. That is any curvature that the % distortion may produce is discounted. % % The input vectors are produced by either finding the derivitives of the % distortion function, or the partial derivitives from a distortion mapping. % They do not need to be the orthogonal dx,dy scaling vectors, but can be % calculated from other derivatives. For example you could use dr,da/r % polar coordinate vector scaling vectors % % If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y) % Then the scaling vectors are determined from the deritives... % du/dx, dv/dx and du/dy, dv/dy % If the resulting scaling vectors is othogonally aligned then... % dv/dx = 0 and du/dy = 0 % Producing an othogonally alligned ellipse in source space for the area to % be resampled. % % Note that scaling vectors are different to argument order. Argument order % is the general order the deritives are extracted from the distortion % equations, and not the scaling vectors. As such the middle two vaules % may be swapped from what you expect. Caution is advised. % % WARNING: It is assumed that any SetResampleFilter() method call will % always be performed before the ScaleResampleFilter() method, so that the % size of the ellipse will match the support for the resampling filter being % used. % % The format of the ScaleResampleFilter method is: % % void ScaleResampleFilter(const ResampleFilter *resample_filter, % const double dux,const double duy,const double dvx,const double dvy) % % A description of each parameter follows: % % o resample_filter: the resampling resample_filterrmation defining the % image being resampled % % o dux,duy,dvx,dvy: % The deritives or scaling vectors defining the EWA ellipse. % NOTE: watch the order, which is based on the order deritives % are usally determined from distortion equations (see above). % The middle two values may need to be swapped if you are thinking % in terms of scaling vectors. % */ MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter, const double dux,const double duy,const double dvx,const double dvy) { double A,B,C,F; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->limit_reached = MagickFalse; /* A 'point' filter forces use of interpolation instead of area sampling */ if ( resample_filter->filter == PointFilter ) return; /* EWA turned off - nothing to do */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "# -----\n" ); (void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n", dux, dvx, duy, dvy); #endif /* Find Ellipse Coefficents such that A*u^2 + B*u*v + C*v^2 = F With u,v relative to point around which we are resampling. And the given scaling dx,dy vectors in u,v space du/dx,dv/dx and du/dy,dv/dy */ #if EWA /* Direct conversion of derivatives into elliptical coefficients However when magnifying images, the scaling vectors will be small resulting in a ellipse that is too small to sample properly. As such we need to clamp the major/minor axis to a minumum of 1.0 to prevent it getting too small. */ #if EWA_CLAMP { double major_mag, minor_mag, major_x, major_y, minor_x, minor_y; ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag, &major_x, &major_y, &minor_x, &minor_y); major_x *= major_mag; major_y *= major_mag; minor_x *= minor_mag; minor_y *= minor_mag; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n", major_x, major_y, minor_x, minor_y); #endif A = major_y*major_y+minor_y*minor_y; B = -2.0*(major_x*major_y+minor_x*minor_y); C = major_x*major_x+minor_x*minor_x; F = major_mag*minor_mag; F *= F; /* square it */ } #else /* raw unclamped EWA */ A = dvx*dvx+dvy*dvy; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy; F = dux*dvy-duy*dvx; F *= F; /* square it */ #endif /* EWA_CLAMP */ #else /* HQ_EWA */ /* This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his thesis, which adds a unit circle to the elliptical area so as to do both Reconstruction and Prefiltering of the pixels in the resampling. It also means it is always likely to have at least 4 pixels within the area of the ellipse, for weighted averaging. No scaling will result with F == 4.0 and a circle of radius 2.0, and F smaller than this means magnification is being used. NOTE: This method produces a very blury result at near unity scale while producing perfect results for strong minitification and magnifications. However filter support is fixed to 2.0 (no good for Windowed Sinc filters) */ A = dvx*dvx+dvy*dvy+1; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy+1; F = A*C - B*B/4; #endif #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F); /* Figure out the various information directly about the ellipse. This information currently not needed at this time, but may be needed later for better limit determination. It is also good to have as a record for future debugging */ { double alpha, beta, gamma, Major, Minor; double Eccentricity, Ellipse_Area, Ellipse_Angle; alpha = A+C; beta = A-C; gamma = sqrt(beta*beta + B*B ); if ( alpha - gamma <= MagickEpsilon ) Major=MagickMaximumValue; else Major=sqrt(2*F/(alpha - gamma)); Minor = sqrt(2*F/(alpha + gamma)); (void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor ); /* other information about ellipse include... */ Eccentricity = Major/Minor; Ellipse_Area = MagickPI*Major*Minor; Ellipse_Angle = atan2(B, A-C); (void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n", (double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area); } #endif /* If one or both of the scaling vectors is impossibly large (producing a very large raw F value), we may as well not bother doing any form of resampling since resampled area is very large. In this case some alternative means of pixel sampling, such as the average of the whole image is needed to get a reasonable result. Calculate only as needed. */ if ( (4*A*C - B*B) > MagickMaximumValue ) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse to match the filters support (that is, multiply F by the square of the support) Simplier to just multiply it by the support twice! */ F *= resample_filter->support; F *= resample_filter->support; /* Orthogonal bounds of the ellipse */ resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B)); resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B)); /* Horizontally aligned parallelogram fitted to Ellipse */ resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */ resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n", resample_filter->Ulimit, resample_filter->Vlimit, resample_filter->Uwidth, resample_filter->slope ); #endif /* Check the absolute area of the parallelogram involved. * This limit needs more work, as it is too slow for larger images * with tiled views of the horizon. */ if ( (resample_filter->Uwidth * resample_filter->Vlimit) > (4.0*resample_filter->image_area)) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse formula to directly index the Filter Lookup Table */ { double scale; #if FILTER_LUT /* scale so that F = WLUT_WIDTH; -- hardcoded */ scale = (double)WLUT_WIDTH/F; #else /* scale so that F = resample_filter->F (support^2) */ scale = resample_filter->F/F; #endif resample_filter->A = A*scale; resample_filter->B = B*scale; resample_filter->C = C*scale; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilter() set the resampling filter lookup table based on a % specific filter. Note that the filter is used as a radial filter not as a % two pass othogonally aligned resampling filter. % % The format of the SetResampleFilter method is: % % void SetResampleFilter(ResampleFilter *resample_filter, % const FilterType filter) % % A description of each parameter follows: % % o resample_filter: resampling resample_filterrmation structure % % o filter: the resize filter for elliptical weighting LUT % */ MagickExport void SetResampleFilter(ResampleFilter *resample_filter, const FilterType filter) { ResizeFilter *resize_filter; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->do_interpolate = MagickFalse; resample_filter->filter = filter; /* Default cylindrical filter is a Cubic Keys filter */ if ( filter == UndefinedFilter ) resample_filter->filter = RobidouxFilter; if ( resample_filter->filter == PointFilter ) { resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } resize_filter = AcquireResizeFilter(resample_filter->image, resample_filter->filter,MagickTrue,resample_filter->exception); if (resize_filter == (ResizeFilter *) NULL) { (void) ThrowMagickException(resample_filter->exception,GetMagickModule(), ModuleError, "UnableToSetFilteringValue", "Fall back to Interpolated 'Point' filter"); resample_filter->filter = PointFilter; resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } /* Get the practical working support for the filter, * after any API call blur factors have been accoded for. */ #if EWA resample_filter->support = GetResizeFilterSupport(resize_filter); #else resample_filter->support = 2.0; /* fixed support size for HQ-EWA */ #endif #if FILTER_LUT /* Fill the LUT with the weights from the selected filter function */ { int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = (double) GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale); /* finished with the resize filter */ resize_filter = DestroyResizeFilter(resize_filter); } #else /* save the filter and the scaled ellipse bounds needed for filter */ resample_filter->filter_def = resize_filter; resample_filter->F = resample_filter->support*resample_filter->support; #endif /* Adjust the scaling of the default unit circle This assumes that any real scaling changes will always take place AFTER the filter method has been initialized. */ ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0); #if 0 /* This is old code kept as a reference only. Basically it generates a Gaussian bell curve, with sigma = 0.5 if the support is 2.0 Create Normal Gaussian 2D Filter Weighted Lookup Table. A normal EWA guassual lookup would use exp(Q*ALPHA) where Q = distance squared from 0.0 (center) to 1.0 (edge) and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767 The table is of length 1024, and equates to support radius of 2.0 thus needs to be scaled by ALPHA*4/1024 and any blur factor squared The it comes from reference code provided by Fred Weinhaus. */ r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = exp((double)Q*r_scale); resample_filter->support = WLUT_WIDTH; #endif #if FILTER_LUT #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp single #endif { if (IsStringTrue(GetImageArtifact(resample_filter->image, "resample:verbose")) != MagickFalse) { int Q; double r_scale; /* Debug output of the filter weighting LUT Gnuplot the LUT data, the x scale index has been adjusted plot [0:2][-.2:1] "lut.dat" with lines The filter values should be normalized for comparision */ printf("#\n"); printf("# Resampling Filter LUT (%d values) for '%s' filter\n", WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions, resample_filter->filter) ); printf("#\n"); printf("# Note: values in table are using a squared radius lookup.\n"); printf("# As such its distribution is not uniform.\n"); printf("#\n"); printf("# The X value is the support distance for the Y weight\n"); printf("# so you can use gnuplot to plot this cylindrical filter\n"); printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n"); printf("#\n"); /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) printf("%8.*g %.*g\n", GetMagickPrecision(),sqrt((double)Q)*r_scale, GetMagickPrecision(),resample_filter->filter_lut[Q] ); printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */ } /* Output the above once only for each image, and each setting (void) DeleteImageArtifact(resample_filter->image,"resample:verbose"); */ } #endif /* FILTER_LUT */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterInterpolateMethod() sets the resample filter interpolation % method. % % The format of the SetResampleFilterInterpolateMethod method is: % % MagickBooleanType SetResampleFilterInterpolateMethod( % ResampleFilter *resample_filter,const InterpolateMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the interpolation method. % */ MagickExport MagickBooleanType SetResampleFilterInterpolateMethod( ResampleFilter *resample_filter,const PixelInterpolateMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->interpolate=method; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterVirtualPixelMethod() changes the virtual pixel method % associated with the specified resample filter. % % The format of the SetResampleFilterVirtualPixelMethod method is: % % MagickBooleanType SetResampleFilterVirtualPixelMethod( % ResampleFilter *resample_filter,const VirtualPixelMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the virtual pixel method. % */ MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod( ResampleFilter *resample_filter,const VirtualPixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->virtual_pixel=method; if (method != UndefinedVirtualPixelMethod) (void) SetCacheViewVirtualPixelMethod(resample_filter->view,method); return(MagickTrue); }
THTensorMath.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorMath.c" #else #define TH_OMP_OVERHEAD_THRESHOLD 100000 void THTensor_(fill)(THTensor *r_, real value) { TH_TENSOR_APPLY(real, r_, THVector_(fill)(r__data, value, r__size); break;); } void THTensor_(zero)(THTensor *r_) { TH_TENSOR_APPLY(real, r_, THVector_(fill)(r__data, 0, r__size); break;); } void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value) { TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); THFree(tensor_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { *tensor_data = value; }); } void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src ) { THTensor *srct = THTensor_(newContiguous)(src); real *src_data = THTensor_(data)(srct); long cntr = 0; long nelem = THTensor_(nElement)(srct); if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask)) { THTensor_(free)(srct); THError("Number of elements of destination tensor != Number of elements in mask"); } TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) { THTensor_(free)(srct); THFree(mask_counter); THFree(tensor_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { if (cntr == nelem) { THTensor_(free)(srct); THFree(mask_counter); THFree(tensor_counter); THError("Number of elements of src < number of ones in mask"); } *tensor_data = *src_data; src_data++; cntr++; }); THTensor_(free)(srct); } void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask) { long numel = THByteTensor_sumall(mask); real *tensor_data; THTensor_(resize1d)(tensor,numel); tensor_data = THTensor_(data)(tensor); TH_TENSOR_APPLY2(real, src, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); THFree(src_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { *tensor_data = *src_data; tensor_data++; }); } // Finds non-zero elements of a tensor and returns their subscripts void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) { long numel = 0; long *subscript_data; long i = 0; long dim; long div = 1; /* First Pass to determine size of subscripts */ TH_TENSOR_APPLY(real, tensor, if (*tensor_data != 0) { ++numel; }); THLongTensor_resize2d(subscript, numel, tensor->nDimension); /* Second pass populates subscripts */ subscript_data = THLongTensor_data(subscript); TH_TENSOR_APPLY(real, tensor, if (*tensor_data != 0) { div = 1; for (dim = tensor->nDimension - 1; dim >= 0; dim--) { *(subscript_data + dim) = (i/div) % tensor->size[dim]; div *= tensor->size[dim]; } subscript_data += tensor->nDimension; } ++i;); } void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { long i, numel; THLongStorage *newSize; THTensor *tSlice, *sSlice; long *index_data; real *tensor_data, *src_data; THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1); THArgCheck(src->nDimension > 0,2,"Source tensor is empty"); numel = THLongTensor_nElement(index); newSize = THLongStorage_newWithSize(src->nDimension); THLongStorage_rawCopy(newSize,src->size); newSize->data[dim] = numel; THTensor_(resize)(tensor,newSize,NULL); THLongStorage_free(newSize); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor)) { tensor_data = THTensor_(data)(tensor); src_data = THTensor_(data)(src); long rowsize = THTensor_(nElement)(src) / src->size[0]; // check that the indices are within range long max = src->size[0]; for (i=0; i<numel; i++) { if (index_data[i] < 1 || index_data[i] > max) { THLongTensor_free(index); THError("index out of range"); } } if (src->nDimension == 1) { #pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) tensor_data[i] = src_data[index_data[i]-1]; } else { #pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) memcpy(tensor_data + i*rowsize, src_data + (index_data[i]-1)*rowsize, rowsize*sizeof(real)); } } else if (src->nDimension == 1) { for (i=0; i<numel; i++) THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i]-1)); } else { for (i=0; i<numel; i++) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor, dim, i); THTensor_(select)(sSlice, src, dim, index_data[i]-1); THTensor_(copy)(tSlice, sSlice); THTensor_(free)(tSlice); THTensor_(free)(sSlice); } } THLongTensor_free(index); } void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { long i, numel; THTensor *tSlice, *sSlice; long *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1); THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (tensor->nDimension > 1 ) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); for (i=0; i<numel; i++) { THTensor_(select)(tSlice, tensor, dim, index_data[i]-1); THTensor_(select)(sSlice, src, dim, i); THTensor_(copy)(tSlice, sSlice); } THTensor_(free)(tSlice); THTensor_(free)(sSlice); } else { for (i=0; i<numel; i++) { THTensor_(set1d)(tensor,index_data[i]-1,THTensor_(get1d)(src,i)); } } THLongTensor_free(index); } void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { long i, numel; THTensor *tSlice, *sSlice; long *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1); THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (tensor->nDimension > 1 ) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); for (i=0; i<numel; i++) { THTensor_(select)(tSlice, tensor, dim, index_data[i]-1); THTensor_(select)(sSlice, src, dim, i); THTensor_(cadd)(tSlice, tSlice, 1.0, sSlice); } THTensor_(free)(tSlice); THTensor_(free)(sSlice); } else { for (i=0; i<numel; i++) { THTensor_(set1d)(tensor,index_data[i]-1,THTensor_(get1d)(src,i) + THTensor_(get1d)(tensor,index_data[i]-1)); } } THLongTensor_free(index); } void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { long i, numel; THTensor *tSlice; long *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim+1); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); for (i=0; i<numel; i++) { if (tensor->nDimension > 1 ) { tSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor,dim,index_data[i]-1); THTensor_(fill)(tSlice, val); THTensor_(free)(tSlice); } else { THTensor_(set1d)(tensor,index_data[i]-1,val); } } THLongTensor_free(index); } void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { long elems_per_row, i, idx; THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2, "Input tensor must have same dimensions as output tensor"); THArgCheck(dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4, "Index tensor must have same dimensions as input tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < 1 || idx > src_size) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in gather"); } *(tensor_data + i*tensor_stride) = src_data[(idx - 1) * src_stride]; }) } void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { long elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4, "Input tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < 1 || idx > tensor_size) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatter"); } tensor_data[(idx - 1) * tensor_stride] = *(src_data + i*src_stride); }) } void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { long elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY2(real, tensor, long, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < 1 || idx > tensor_size) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatter"); } tensor_data[(idx - 1) * tensor_stride] = val; }) } accreal THTensor_(dot)(THTensor *tensor, THTensor *src) { accreal sum = 0; /* we use a trick here. careful with that. */ TH_TENSOR_APPLY2(real, tensor, real, src, long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i); sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride); tensor_i += sz; src_i += sz; tensor_data += sz*tensor_stride; src_data += sz*src_stride; break;); return sum; } #undef th_isnan #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #define th_isnan(val) \ if (isnan(value)) break; #else #define th_isnan(val) #endif real THTensor_(minall)(THTensor *tensor) { real theMin; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMin = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value<theMin in the case of NaNs */ if(!(value >= theMin)) { theMin = value; th_isnan(value) }); return theMin; } real THTensor_(maxall)(THTensor *tensor) { real theMax; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMax = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theMax = value; th_isnan(value) }); return theMax; } accreal THTensor_(sumall)(THTensor *tensor) { accreal sum = 0; TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;); return sum; } accreal THTensor_(prodall)(THTensor *tensor) { accreal prod = 1; TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;); return prod; } void THTensor_(add)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] + value; } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;); } } void THTensor_(sub)(THTensor *r_, THTensor *t, real value) { THTensor_(add)(r_, t, -value); } void THTensor_(mul)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] * value; } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;); } } void THTensor_(div)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] / value; } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;); } } void THTensor_(fmod)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = fmod(tp[i], value); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = fmod(*t_data, value);); } } void THTensor_(remainder)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = (value == 0)? NAN : tp[i] - value * floor(tp[i] / value); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value);); } } void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); real t_val; long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);); } } void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { if(r_ == t) { THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1); } else { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i< sz; i++) rp[i] = tp[i] + value * sp[i]; } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;); } } void THTensor_(csub)(THTensor *r_, THTensor *t, real value,THTensor *src) { THTensor_(cadd)(r_, t, -value, src); } void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] * sp[i]; } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;); } } void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = pow(tp[i], sp[i]); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = pow(*t_data, *src_data);); } } void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = tp[i] / sp[i]; } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;); } } void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = fmod(tp[i], sp[i]); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data);); } } void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = (sp[i] == 0)? NAN : tp[i] - sp[i] * floor(tp[i] / sp[i]); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data);); } } void THTensor_(tpow)(THTensor *r_, real value, THTensor *t) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); long sz = THTensor_(nElement)(t); long i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = pow(value, tp[i]); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = pow(value, *t_data);); } } void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;); } void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;); } void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec) { if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected, got %dD, %dD", mat->nDimension, vec->nDimension); if( mat->size[1] != vec->size[0] ) { THDescBuff bm = THTensor_(sizeDesc)(mat); THDescBuff bv = THTensor_(sizeDesc)(vec); THError("size mismatch, %s, %s", bm.str, bv.str); } if(t->nDimension != 1) THError("vector expected, got t: %dD", t->nDimension); if(t->size[0] != mat->size[0]) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm = THTensor_(sizeDesc)(mat); THError("size mismatch, t: %s, mat: %s", bt.str, bm.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } if(mat->stride[0] == 1) { THBlas_(gemv)('n', mat->size[0], mat->size[1], alpha, THTensor_(data)(mat), mat->stride[1], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else if(mat->stride[1] == 1) { THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(mat), mat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cmat = THTensor_(newContiguous)(mat); THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(cmat), cmat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); THTensor_(free)(cmat); } } void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain) { long N1 = m1->size[0]; long N2 = m2->size[0]; long dim; real *m1_p; real *m2_p; real *r_p; long i; THTensor_(resize2d)(r_, N1, N2); m1 = THTensor_(newContiguous)(m1); m2 = THTensor_(newContiguous)(m2); THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1); THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2); dim = m1->size[1]; THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim"); m1_p = THTensor_(data)(m1); m2_p = THTensor_(data)(m2); r_p = THTensor_(data)(r_); #pragma omp parallel for private(i) for (i=0; i<N1; i++) { long j,k; for (j=0; j<N2; j++) { real sum = 0; for (k=0; k<dim; k++) { real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ]; sum += term*term; } r_p[ i*N2 + j ] = gain * sum; } } THTensor_(free)(m1); THTensor_(free)(m2); } void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2) { char transpose_r, transpose_m1, transpose_m2; THTensor *r__, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2)) THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension); if(m1->size[1] != m2->size[0]) { THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( t->nDimension != 2 ) THError("matrix expected, got %dD tensor for t", t->nDimension); if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } /* printf("%ldx%ld = %ldx%ld X %ldx%ld\n", r_->size[0], r_->size[1], m1->size[0], m1->size[1], m2->size[0], m2->size[1]); */ /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && r_->stride[0] != 0) { THTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1); r__ = THTensor_(newClone)(transp_r_); THTensor_(free)(transp_r_); THTensor_(transpose)(r__, NULL, 0, 1); } /* m1 */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THTensor_(newContiguous)(m1); } /* m2 */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THTensor_(newContiguous)(m2); } /* do the operation */ THBlas_(gemm)(transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THTensor_(data)(m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THTensor_(data)(m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THTensor_(data)(r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); /* free intermediate variables */ if(m1_ != m1) THTensor_(free)(m1_); if(m2_ != m2) THTensor_(free)(m2_); if(r__ != r_) THTensor_(freeCopyTo)(r__, r_); } void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2) { if( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) THError("vector and vector expected, got %dD, %dD tensors", vec1->nDimension, vec2->nDimension); if(t->nDimension != 2) THError("expected matrix, got %dD tensor for t", t->nDimension); if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bv1 = THTensor_(sizeDesc)(vec1); THDescBuff bv2 = THTensor_(sizeDesc)(vec2); THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } if(beta != 1) THTensor_(mul)(r_, r_, beta); if(r_->stride[0] == 1) { THBlas_(ger)(vec1->size[0], vec2->size[0], alpha, THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(r_), r_->stride[1]); } else if(r_->stride[1] == 1) { THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cr = THTensor_(newClone)(r_); THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(cr), cr->stride[0]); THTensor_(freeCopyTo)(cr, r_); } } void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { long batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor"); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor"); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2)); long dim1 = THTensor_(size)(batch1, 1); long dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); THTensor_(copy)(result, t); } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2); beta = 1; // accumulate output once } THTensor_(free)(matrix1); THTensor_(free)(matrix2); } void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { long batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1)); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2)); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2)); long bs = THTensor_(size)(batch1, 0); long dim1 = THTensor_(size)(batch1, 1); long dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); THTensor_(copy)(result, t); } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); THTensor *result_matrix = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(select)(result_matrix, result, 0, batch); THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2); } THTensor_(free)(matrix1); THTensor_(free)(matrix2); THTensor_(free)(result_matrix); } long THTensor_(numel)(THTensor *t) { return THTensor_(nElement)(t); } void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { THLongStorage *dim; real theMax; real value; long theIndex; long i; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, theMax = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theIndex = i; theMax = value; th_isnan(value) } } *indices__data = theIndex; *values__data = theMax;); } void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { THLongStorage *dim; real theMin; real value; long theIndex; long i; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, theMin = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value<theMin in the case of NaNs */ if(!(value >= theMin)) { theIndex = i; theMin = value; th_isnan(value) } } *indices__data = theIndex; *values__data = theMin;); } void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; long i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride]; *r__data = (real)sum;); } void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal prod = 1; long i; for(i = 0; i < t_size; i++) prod *= t_data[i*t_stride]; *r__data = (real)prod;); } void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumsum = 0; long i; for(i = 0; i < t_size; i++) { cumsum += t_data[i*t_stride]; r__data[i*r__stride] = (real)cumsum; }); } void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension+1); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumprod = 1; long i; for(i = 0; i < t_size; i++) { cumprod *= t_data[i*t_stride]; r__data[i*r__stride] = (real)cumprod; }); } void THTensor_(sign)(THTensor *r_, THTensor *t) { THTensor_(resizeAs)(r_, t); #if defined (TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else *r__data = 0;); #else TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else if (*t_data < 0) *r__data = -1; else *r__data = 0;); #endif } accreal THTensor_(trace)(THTensor *t) { real *t_data = THTensor_(data)(t); accreal sum = 0; long i = 0; long t_stride_0, t_stride_1, t_diag_size; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)); while(i < t_diag_size) { sum += t_data[i*(t_stride_0+t_stride_1)]; i++; } return sum; } void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension) { int i; if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b)) THError("inconsistent tensor dimension %dD, %dD", THTensor_(nDimension)(a), THTensor_(nDimension)(b)); for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) { THDescBuff ba = THTensor_(sizeDesc)(a); THDescBuff bb = THTensor_(sizeDesc)(b); THError("inconsistent tensor sizes %s, %s", ba.str, bb.str); } } if(dimension < 0) { for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) == 3) { dimension = i; break; } } if(dimension < 0) { THDescBuff ba = THTensor_(sizeDesc)(a); THError("no dimension of size 3 in a: %s", ba.str); } } THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range", dimension+1); THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3", dimension+1); THTensor_(resizeAs)(r_, a); TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension, r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride]; r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride]; r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];); } void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data > *src_data ? *t_data : *src_data;); } void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data < *src_data ? *t_data : *src_data;); } void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data > value ? *t_data : value;); } void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data < value ? *t_data : value;); } void THTensor_(zeros)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(zero)(r_); } void THTensor_(ones)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(fill)(r_, 1); } void THTensor_(diag)(THTensor *r_, THTensor *t, int k) { THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected"); if(THTensor_(nDimension)(t) == 1) { real *t_data = THTensor_(data)(t); long t_stride_0 = THTensor_(stride)(t, 0); long t_size = THTensor_(size)(t, 0); long sz = t_size + (k >= 0 ? k : -k); real *r__data; long r__stride_0; long r__stride_1; long i; THTensor_(resize2d)(r_, sz, sz); THTensor_(zero)(r_); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0); for(i = 0; i < t_size; i++) r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0]; } else { real *t_data = THTensor_(data)(t); long t_stride_0 = THTensor_(stride)(t, 0); long t_stride_1 = THTensor_(stride)(t, 1); long sz; real *r__data; long r__stride_0; long i; if(k >= 0) sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k); else sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1)); THTensor_(resize1d)(r_, sz); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0); for(i = 0; i < sz; i++) r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)]; } } void THTensor_(eye)(THTensor *r_, long n, long m) { real *r__data; long i, sz; THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THTensor_(resize2d)(r_, n, m); THTensor_(zero)(r_); i = 0; r__data = THTensor_(data)(r_); sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1)); for(i = 0; i < sz; i++) r__data[i*(r_->stride[0]+r_->stride[1])] = 1; } void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { long size; real i = 0; THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound incoherent with step sign"); size = (long) (((xmax - xmin) / step) + 1); if (THTensor_(nElement)(r_) != size) { THTensor_(resize1d)(r_, size); } TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;); } void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, long n) { real *r__data; long r__stride_0; long i; THArgCheck(n > 0, 1, "must be strictly positive"); THTensor_(resize1d)(r_, n); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_,0); for(i = 0; i < n; i++) r__data[i*r__stride_0] = (real)(i); for(i = 0; i < n-1; i++) { long z = THRandom_random(_generator) % (n-i); real sav = r__data[i*r__stride_0]; r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0]; r__data[(z+i)*r__stride_0] = sav; } } void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(copy)(r_, t); } /* I cut and pasted (slightly adapted) the quicksort code from Sedgewick's 1978 "Implementing Quicksort Programs" article http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf It is the state of the art existing implementation. The macros are here to make as close a match as possible to the pseudocode of Program 2 p.851 Note that other partition schemes exist, and are typically presented in textbook, but those are less efficient. See e.g. http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto Julien, November 12th 2013 */ #define MAX_LEVELS 300 #define M_SMALL 10 /* Limit for small subfiles */ #define ARR(III) arr[(III)*stride] #define IDX(III) idx[(III)*stride] #define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap #define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap #define BOTH_SWAP(III, JJJ) \ REAL_SWAP(ARR(III), ARR(JJJ)); \ LONG_SWAP(IDX(III), IDX(JJJ)) static void THTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride) { long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) < piv); do { j = j-1; } while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) > ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) < piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } static void THTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride) { long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) > piv); do { j = j-1; } while(ARR(j) < piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) < ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) > piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } #undef MAX_LEVELS #undef M_SMALL void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension+1); THTensor_(resizeAs)(rt_, t); THTensor_(copy)(rt_, t); { THLongStorage *size = THTensor_(newSizeOf)(t); THLongTensor_resize(ri_, size, NULL); THLongStorage_free(size); } if(descendingOrder) { TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension, long i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);) } else { TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension, long i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);) } } /* Implementation of the Quickselect algorithm, based on Nicolas Devillard's public domain implementation at http://ndevilla.free.fr/median/median/ Adapted similarly to the above Quicksort algorithm. */ static void THTensor_(quickselect)(real *arr, long *idx, long k, long elements, long stride) { long P, L, R, i, j, swap, pid; real rswap, piv; L = 0; R = elements-1; do { if (R <= L) /* One element only */ return; if (R == L+1) { /* Two elements only */ if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } return; } /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do i++; while(ARR(i) < piv); do j--; while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Re-set active partition */ if (j <= k) L=i; if (j >= k) R=j-1; } while(1); } #undef ARR #undef IDX #undef LONG_SWAP #undef REAL_SWAP #undef BOTH_SWAP void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { THLongStorage *dim; THTensor *temp_; THLongTensor *tempi_; real *temp__data; long *tempi__data; long t_size_dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); t_size_dim = THTensor_(size)(t, dimension); temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); temp__data = THTensor_(data)(temp_); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, long i; long mode = 0; long modei = 0; long temp_freq = 0; long max_freq = 0; for(i = 0; i < t_size_dim; i++) temp__data[i] = t_data[i*t_stride]; for(i = 0; i < t_size_dim; i++) tempi__data[i] = i; THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1); for(i = 0; i < t_size_dim; i++) { temp_freq++; if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1])) { if (temp_freq > max_freq) { mode = temp__data[i]; modei = tempi__data[i]; max_freq = temp_freq; } temp_freq = 0; } } *values__data = mode; *indices__data = modei;); THTensor_(free)(temp_); THLongTensor_free(tempi_); } void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, long k, int dimension) { THLongStorage *dim; THTensor *temp_; THLongTensor *tempi_; real *temp__data; long *tempi__data; long t_size_dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); THArgCheck(k >= 0 && k < t->size[dimension], 2, "selected index out of range"); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); t_size_dim = THTensor_(size)(t, dimension); temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); temp__data = THTensor_(data)(temp_); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, long i; for(i = 0; i < t_size_dim; i++) temp__data[i] = t_data[i*t_stride]; for(i = 0; i < t_size_dim; i++) tempi__data[i] = i; THTensor_(quickselect)(temp__data, tempi__data, k, t_size_dim, 1); *values__data = temp__data[k]; *indices__data = tempi__data[k];); THTensor_(free)(temp_); THLongTensor_free(tempi_); } void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension) { long t_size_dim, k; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); t_size_dim = THTensor_(size)(t, dimension); k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */ THTensor_(kthvalue)(values_, indices_, t, k, dimension); } void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, long k, int dim, int dir, int sorted) { int numDims = THTensor_(nDimension)(t); THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range"); long sliceSize = THTensor_(size)(t, dim); THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension"); THTensor *tmpResults = THTensor_(new)(); THTensor_(resize1d)(tmpResults, sliceSize); real *tmp__data = THTensor_(data)(tmpResults); THLongTensor *tmpIndices = THLongTensor_new(); THLongTensor_resize1d(tmpIndices, sliceSize); long *tmpi__data = THLongTensor_data(tmpIndices); THLongStorage *topKSize = THTensor_(newSizeOf)(t); THLongStorage_set(topKSize, dim, k); THTensor_(resize)(rt_, topKSize, NULL); THLongTensor_resize(ri_, topKSize, NULL); THLongStorage_free(topKSize); if (dir) { /* k largest elements, descending order (optional: see sorted) */ long K = sliceSize - k; TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim, long i; for(i = 0; i < sliceSize; i++) { tmp__data[i] = t_data[i*t_stride]; tmpi__data[i] = i; } if (K > 0) THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1); if (sorted) THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1); for(i = 0; i < k; i++) { rt__data[i*rt__stride] = tmp__data[i + K]; ri__data[i*ri__stride] = tmpi__data[i + K]; }) } else { /* k smallest elements, ascending order (optional: see sorted) */ TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim, long i; for(i = 0; i < sliceSize; i++) { tmp__data[i] = t_data[i*t_stride]; tmpi__data[i] = i; } THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1); if (sorted) THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1); for(i = 0; i < k; i++) { rt__data[i*rt__stride] = tmp__data[i]; ri__data[i*ri__stride] = tmpi__data[i]; }) } THTensor_(free)(tmpResults); THLongTensor_free(tmpIndices); } void THTensor_(tril)(THTensor *r_, THTensor *t, long k) { long t_size_0, t_size_1; long t_stride_0, t_stride_1; long r__stride_0, r__stride_1; real *t_data, *r__data; long r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { long sz = THMin(r+k+1, t_size_1); for(c = THMax(0, r+k); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; } } void THTensor_(triu)(THTensor *r_, THTensor *t, long k) { long t_size_0, t_size_1; long t_stride_0, t_stride_1; long r__stride_0, r__stride_1; real *t_data, *r__data; long r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { long sz = THMin(r+k, t_size_1); for(c = THMax(0, r+k); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; } } void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension) { THTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THTensor_(catArray)(r_, inputs, 2, dimension); } void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension) { THLongStorage *size; int i, j; long offset; int ndim = dimension + 1; for (i = 0; i < numInputs; i++) { ndim = THMax(ndim, inputs[i]->nDimension); } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension+1); size = THLongStorage_newWithSize(ndim); for(i = 0; i < ndim; i++) { long dimSize = i < inputs[0]->nDimension ? inputs[0]->size[i] : 1; if (i == dimension) { for (j = 1; j < numInputs; j++) { dimSize += i < inputs[j]->nDimension ? inputs[j]->size[i] : 1; } } else { for (j = 1; j < numInputs; j++) { if (dimSize != (i < inputs[j]->nDimension ? inputs[j]->size[i] : 1)) { THLongStorage_free(size); THError("inconsistent tensor sizes"); } } } size->data[i] = dimSize; } THTensor_(resize)(result, size, NULL); THLongStorage_free(size); offset = 0; for (j = 0; j < numInputs; j++) { long dimSize = dimension < inputs[j]->nDimension ? inputs[j]->size[dimension] : 1; THTensor *nt = THTensor_(newWithTensor)(result); THTensor_(narrow)(nt, NULL, dimension, offset, dimSize); THTensor_(copy)(nt, inputs[j]); THTensor_(free)(nt); offset += dimSize; } } int THTensor_(equal)(THTensor *ta, THTensor* tb) { int equal = 1; if(!THTensor_(isSameSizeAs)(ta, tb)) return 0; if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) { real *tap = THTensor_(data)(ta); real *tbp = THTensor_(data)(tb); long sz = THTensor_(nElement)(ta); long i; for (i=0; i<sz; ++i){ if(tap[i] != tbp[i]) return 0; } } else { // Short-circuit the apply function on inequality TH_TENSOR_APPLY2(real, ta, real, tb, if (equal && *ta_data != *tb_data) { equal = 0; TH_TENSOR_APPLY_hasFinished = 1; break; }) } return equal; } #define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \ void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \ { \ THByteTensor_rawResize(r_, t->nDimension, t->size, NULL); \ THByteTensor_zero(r_); \ TH_TENSOR_APPLY2(unsigned char, r_, real, t, \ if (*t_data OP value) *r__data = 1;); \ } \ void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \ { \ THTensor_(rawResize)(r_, t->nDimension, t->size, NULL); \ THTensor_(zero)(r_); \ TH_TENSOR_APPLY2(real, r_, real, t, \ if (*t_data OP value) *r__data = 1;); \ } \ void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \ { \ THByteTensor_rawResize(r_, ta->nDimension, ta->size, NULL); \ THByteTensor_zero(r_); \ TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \ if(*ta_data OP *tb_data) *r__data = 1;); \ } \ void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \ { \ THTensor_(rawResize)(r_, ta->nDimension, ta->size, NULL); \ THTensor_(zero)(r_); \ TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \ if(*ta_data OP *tb_data) *r__data = 1;); \ } \ TENSOR_IMPLEMENT_LOGICAL(lt,<) TENSOR_IMPLEMENT_LOGICAL(gt,>) TENSOR_IMPLEMENT_LOGICAL(le,<=) TENSOR_IMPLEMENT_LOGICAL(ge,>=) TENSOR_IMPLEMENT_LOGICAL(eq,==) TENSOR_IMPLEMENT_LOGICAL(ne,!=) #define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t) \ { \ THTensor_(resizeAs)(r_, t); \ TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \ } \ #define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t, real value) \ { \ THTensor_(resizeAs)(r_, t); \ TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \ } \ #if defined(TH_REAL_IS_LONG) LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs) #endif /* long only part */ #if defined(TH_REAL_IS_INT) LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs) #endif /* int only part */ #if defined(TH_REAL_IS_BYTE) #define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \ int THTensor_(NAME)(THTensor *tensor) \ { \ THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); \ int sum = INIT_VALUE; \ TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \ return sum; \ } TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1) TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0) #endif /* Byte only part */ /* floating point only now */ #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) LAB_IMPLEMENT_BASIC_FUNCTION(log,log) LAB_IMPLEMENT_BASIC_FUNCTION(log1p,log1p) LAB_IMPLEMENT_BASIC_FUNCTION(sigmoid,TH_sigmoid) LAB_IMPLEMENT_BASIC_FUNCTION(exp,exp) LAB_IMPLEMENT_BASIC_FUNCTION(cos,cos) LAB_IMPLEMENT_BASIC_FUNCTION(acos,acos) LAB_IMPLEMENT_BASIC_FUNCTION(cosh,cosh) LAB_IMPLEMENT_BASIC_FUNCTION(sin,sin) LAB_IMPLEMENT_BASIC_FUNCTION(asin,asin) LAB_IMPLEMENT_BASIC_FUNCTION(sinh,sinh) LAB_IMPLEMENT_BASIC_FUNCTION(tan,tan) LAB_IMPLEMENT_BASIC_FUNCTION(atan,atan) LAB_IMPLEMENT_BASIC_FUNCTION(tanh,tanh) LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,pow) LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,sqrt) LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_rsqrt) LAB_IMPLEMENT_BASIC_FUNCTION(ceil,ceil) LAB_IMPLEMENT_BASIC_FUNCTION(floor,floor) LAB_IMPLEMENT_BASIC_FUNCTION(round,round) LAB_IMPLEMENT_BASIC_FUNCTION(abs,fabs) LAB_IMPLEMENT_BASIC_FUNCTION(trunc,trunc) LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_frac) LAB_IMPLEMENT_BASIC_FUNCTION(neg,-) LAB_IMPLEMENT_BASIC_FUNCTION(cinv, 1.0 / ) void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty) { THTensor_(resizeAs)(r_, tx); TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = atan2(*tx_data,*ty_data);); } void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight) { THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match"); THTensor_(resizeAs)(r_, a); TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_lerp(*a_data, *b_data, weight);); } void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; long i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride]; *r__data = (real)sum/t_size;); } void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int flag) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; accreal sum2 = 0; long i; for(i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; sum += z; sum2 += z*z; } if(flag) { sum /= t_size; sum2 /= t_size; sum2 -= sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = (real)sqrt(sum2); } else { sum /= t_size; sum2 /= t_size-1; sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = (real)sqrt(sum2); }); } void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int flag) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; accreal sum2 = 0; long i; for(i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; sum += z; sum2 += z*z; } if(flag) { sum /= t_size; sum2 /= t_size; sum2 -= sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = sum2; } else { sum /= t_size; sum2 /= t_size-1; sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum; sum2 = (sum2 < 0 ? 0 : sum2); *r__data = (real)sum2; }); } void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension+1); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); if(value == 0) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; long i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride] != 0.0; *r__data = sum;) } else { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; long i; for(i = 0; i < t_size; i++) sum += pow(fabs(t_data[i*t_stride]), value); *r__data = pow(sum, 1.0/value);) } } accreal THTensor_(normall)(THTensor *tensor, real value) { accreal sum = 0; if(value == 0) { TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;); return sum; } else if(value == 1) { TH_TENSOR_APPLY(real, tensor, sum += fabs(*tensor_data);); return sum; } else if(value == 2) { TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;); return sqrt(sum); } else { TH_TENSOR_APPLY(real, tensor, sum += pow(fabs(*tensor_data), value);); return pow(sum, 1.0/value); } } void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm) { int i; THTensor *rowR, *rowS; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d", dimension+1); THArgCheck(value > 0, 2, "non-positive-norm not supported"); THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions", THTensor_(nDimension)(src)); rowR = THTensor_(new)(); rowS = THTensor_(new)(); THTensor_(resizeAs)(res, src); for (i=0; i<src->size[dimension]; i++) { real norm = 0; real new_norm; THTensor_(select)(rowS, src, dimension, i); THTensor_(select)(rowR, res, dimension, i); if (value == 1) { TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data);); } else if (value == 2) { TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;); } else { TH_TENSOR_APPLY(real, rowS, norm += pow(fabs(*rowS_data), value);); } norm = pow(norm, 1/value); if (norm > maxnorm) { new_norm = maxnorm / (norm + 1e-7); TH_TENSOR_APPLY2( real, rowR, real, rowS, *rowR_data = (*rowS_data) * new_norm; ) } else THTensor_(copy)(rowR, rowS); } THTensor_(free)(rowR); THTensor_(free)(rowS); } accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value) { real sum = 0; TH_TENSOR_APPLY2(real, tensor, real, src, sum += pow(fabs(*tensor_data - *src_data), value);) return pow(sum, 1.0/value); } accreal THTensor_(meanall)(THTensor *tensor) { THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor); } accreal THTensor_(varall)(THTensor *tensor) { accreal mean = THTensor_(meanall)(tensor); accreal sum = 0; TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean);); sum /= (THTensor_(nElement)(tensor)-1); return sum; } accreal THTensor_(stdall)(THTensor *tensor) { return sqrt(THTensor_(varall)(tensor)); } void THTensor_(linspace)(THTensor *r_, real a, real b, long n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THTensor_(nElement)(r_) != n) { THTensor_(resize1d)(r_, n); } if(n == 1) { TH_TENSOR_APPLY(real, r_, *r__data = a; i++; ); } else { TH_TENSOR_APPLY(real, r_, *r__data = a + i*(b-a)/((real)(n-1)); i++; ); } } void THTensor_(logspace)(THTensor *r_, real a, real b, long n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THTensor_(nElement)(r_) != n) { THTensor_(resize1d)(r_, n); } if(n == 1) { TH_TENSOR_APPLY(real, r_, *r__data = pow(10.0, a); i++; ); } else { TH_TENSOR_APPLY(real, r_, *r__data = pow(10.0, a + i*(b-a)/((real)(n-1))); i++; ); } } void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(uniform)(r_, _generator, 0, 1); } void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(normal)(r_, _generator, 0, 1); } void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue) { THTensor *clone; real minval; real maxval; real bins; real *h_data; THTensor_(resize1d)(hist, nbins); THTensor_(zero)(hist); minval = minvalue; maxval = maxvalue; if (minval == maxval) { minval = THTensor_(minall)(tensor); maxval = THTensor_(maxall)(tensor); } if (minval == maxval) { minval = minval - 1; maxval = maxval + 1; } bins = (real)(nbins)-1e-6; clone = THTensor_(newWithSize1d)(THTensor_(nElement)(tensor)); THTensor_(copy)(clone,tensor); THTensor_(add)(clone, clone, -minval); THTensor_(div)(clone, clone, (maxval-minval)); THTensor_(mul)(clone, clone, bins); THTensor_(floor)(clone, clone); THTensor_(add)(clone, clone, 1); h_data = THTensor_(data)(hist); TH_TENSOR_APPLY(real, clone, \ if ((*clone_data <= nbins) && (*clone_data >= 1)) { \ *(h_data + (int)(*clone_data) - 1) += 1; \ }); THTensor_(free)(clone); } #endif /* floating point only part */ #endif
TBB_Parallel_SpeedUp.h
#ifndef TBB_PARALLAL_SPEEDUP_H #define TBB_PARALLAL_SPEEDUP_H #pragma once #include<opencv2\opencv.hpp> #include <thread> #include <mutex> #include <atomic> #include <map> using namespace cv; class Parallel_process_gau : public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; const int size; int diff; public: Parallel_process_gau(const cv::Mat &inputImgage, cv::Mat& outImage, const int &sizeVal, int diffVal) : img(inputImgage), retVal(outImage), size(sizeVal), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows/diff)*i, img.cols, img.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); GaussianBlur( in, out, Size(size,size) , 0); } } }; class Parallel_process_er : public cv::ParallelLoopBody { private: const Mat &img; const Mat &element; Mat& retVal; int struc_elem; int diff; public: Parallel_process_er(const cv::Mat &inputImgage, cv::Mat& outImage, const cv::Mat &struc_ele, int diffVal) : img(inputImgage), retVal(outImage), element(struc_ele), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for //Mat element = getStructuringElement( struc_elem, Size( 2*size + 1, 2*size+1 ), Point( size, size ) ); for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows/diff)*i, img.cols, img.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); //Morphology_Operations(in, tmp , MORPH_OPEN, openint_size, MORPH_RECT); /// Apply the specified morphology operation //morphologyEx( in, out, MORPH_ERODE, element ,Point(-1,-1), 1,BORDER_DEFAULT); erode(in, out, element, Point(-1, -1), 1, BORDER_DEFAULT); } } }; class Parallel_process_di : public cv::ParallelLoopBody { private: const Mat &img; const Mat &element; Mat& retVal; int struc_elem; int diff; public: Parallel_process_di(const cv::Mat &inputImgage, cv::Mat& outImage, const cv::Mat &struc_ele, int diffVal) : img(inputImgage), retVal(outImage), element(struc_ele), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for //Mat element = getStructuringElement( struc_elem, Size( 2*size + 1, 2*size+1 ), Point( size, size ) ); for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows/diff)*i, img.cols, img.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); //Morphology_Operations(in, tmp , MORPH_OPEN, openint_size, MORPH_RECT); /// Apply the specified morphology operation //morphologyEx( in, out, MORPH_DILATE, element ,Point(-1,-1), 1,BORDER_DEFAULT); dilate(in, out, element, Point(-1, -1), 1, BORDER_DEFAULT); } } }; class Parallel_process_threBinary : public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; const int thresholdValue; int diff; public: Parallel_process_threBinary(const cv::Mat &inputImgage, cv::Mat& outImage, const int &thresh_value, int diffVal) : img(inputImgage), retVal(outImage), thresholdValue(thresh_value), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows/diff)*i, img.cols, img.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); threshold(in, out, thresholdValue, 255,THRESH_BINARY); } } }; class Parallel_process_scharrX: public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; int diff; public: Parallel_process_scharrX(const cv::Mat &inputImgage, cv::Mat& outImage, int diffVal) : img(inputImgage), retVal(outImage), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows/diff)*i, img.cols, img.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); Scharr( in, out, CV_16S, 1, 0, 3, 0, BORDER_DEFAULT ); } } }; class Parallel_process_scharrY: public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; int diff; public: Parallel_process_scharrY(const cv::Mat &inputImgage, cv::Mat& outImage, int diffVal) : img(inputImgage), retVal(outImage), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows/diff)*i, img.cols, img.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); Scharr( in, out, CV_16S, 0, 1, 3, 0, BORDER_DEFAULT ); } } }; class Parallel_process_convertScaleAbs : public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; Mat element; int diff; public: Parallel_process_convertScaleAbs(const cv::Mat &inputImgage, cv::Mat& outImage, int diffVal) : img(inputImgage), retVal(outImage), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows/diff)*i, img.cols, img.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); convertScaleAbs( in, out ); } } }; class Parallel_process_convertScaleAbs_findMinMax : public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; int diff; double *min_val_cand; double *max_val_cand; public: Parallel_process_convertScaleAbs_findMinMax(const cv::Mat &inputImgage, cv::Mat& outImage, double *min_val_cand, double *max_val_cand, int diffVal) : img(inputImgage), retVal(outImage), min_val_cand(min_val_cand), max_val_cand(max_val_cand), diff(diffVal) {} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows / diff)*i, retVal.cols, retVal.rows / diff)); double min_val; double max_val; convertScaleAbs(in, out); minMaxLoc(out, &min_val, &max_val, NULL, NULL); min_val_cand[i] = min_val; max_val_cand[i] = max_val; } } }; class Parallel_process_normalize : public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; Mat element; int diff; public: Parallel_process_normalize(const cv::Mat &inputImgage, cv::Mat& outImage, int diffVal) : img(inputImgage), retVal(outImage), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows/diff)*i, img.cols, img.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); normalize(in, out, 0, 255, NORM_MINMAX, CV_8UC1);//L } } }; class Parallel_process_bitwiseand : public cv::ParallelLoopBody { private: const Mat &imgA; const Mat &imgB; Mat& retVal; Mat element; int diff; public: Parallel_process_bitwiseand(const cv::Mat &inputImgageA, const cv::Mat &inputImgageB, cv::Mat& outImage, int diffVal) : imgA(inputImgageA), imgB(inputImgageB), retVal(outImage), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat inA(imgA, cv::Rect(0, (imgA.rows/diff)*i, imgA.cols, imgA.rows/diff)); cv::Mat inB(imgB, cv::Rect(0, (imgB.rows/diff)*i, imgB.cols, imgB.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); bitwise_and(inA, inB, out); } } }; class Parallel_process_displayIrisRegion : public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; Mat element; int diff; public: Parallel_process_displayIrisRegion(const cv::Mat &inputImgage, cv::Mat& outImage, int diffVal) : img(inputImgage), retVal(outImage), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for(int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat inA(img, cv::Rect(0, (img.rows/diff)*i, img.cols, img.rows/diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows/diff)*i, retVal.cols, retVal.rows/diff)); for(int j=0;i<inA.rows;++i){ for(int k=0;j<inA.cols;++j){ if(inA.at<uchar>(j , k)==255){ out.at<Vec3b>(j , k) = Vec3b(255 , 255 , 255); } } } } } }; class Parallel_process_inv : public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; int diff; public: Parallel_process_inv(const cv::Mat &inputImgage, cv::Mat& outImage, int diffVal) : img(inputImgage), retVal(outImage), diff(diffVal){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows / diff)*i, retVal.cols, retVal.rows / diff)); out = Scalar::all(255) - in; } } }; class Parallel_process_sub : public cv::ParallelLoopBody { private: const Mat &imgA; const Mat &imgB; Mat& retVal; int diff; public: Parallel_process_sub(const cv::Mat &inputImgageA, const cv::Mat &inputImgageB, cv::Mat& outImage, int diffVal) : imgA(inputImgageA), imgB(inputImgageB), retVal(outImage), diff(diffVal) {} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat inA(imgA, cv::Rect(0, (imgA.rows / diff)*i, imgA.cols, imgA.rows / diff)); cv::Mat inB(imgB, cv::Rect(0, (imgB.rows / diff)*i, imgB.cols, imgB.rows / diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows / diff)*i, retVal.cols, retVal.rows / diff)); cv::subtract(inA, inB, out); //out = inA - inB; } } }; class Parallel_process_hist : public cv::ParallelLoopBody { private: const Mat &img; int diff; int *hist; std::mutex &mtx; int &total_hist_sum; public: Parallel_process_hist(const cv::Mat &inputImgage, int *hist, std::mutex &mtx, int &total_hist_sum, int diffVal) : img(inputImgage), diff(diffVal), hist(hist), mtx(mtx), total_hist_sum(total_hist_sum){} virtual void operator()(const cv::Range& range) const { for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); int *local_hist = new int[256](); double local_hist_sum = 0; for (int r = 0; r<in.rows; ++r) { for (int c = 0; c<in.cols; ++c) { int temp = in.at<uchar>(r, c); local_hist[temp]++; } } for (int j = 0; j < 256; ++j) { local_hist_sum += j*local_hist[j]; } mtx.lock(); for (int k = 0; k < 256; ++k) { hist[k] += local_hist[k]; } total_hist_sum += local_hist_sum; mtx.unlock(); } } }; class Parallel_process_hist_pure : public cv::ParallelLoopBody { private: const Mat &img; int diff; int *hist; std::mutex &mtx; public: Parallel_process_hist_pure(const cv::Mat &inputImgage, int *hist, std::mutex &mtx, int diffVal) : img(inputImgage), diff(diffVal), hist(hist), mtx(mtx) {} virtual void operator()(const cv::Range& range) const { for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); int *local_hist = new int[256](); for (int r = 0; r<in.rows; ++r) { for (int c = 0; c<in.cols; ++c) { int temp = in.at<uchar>(r, c); local_hist[temp]++; } } mtx.lock(); for (int k = 0; k < 256; ++k) { hist[k] += local_hist[k]; } mtx.unlock(); } } }; class Parallel_process_hist_and_cumulative_pure : public cv::ParallelLoopBody { private: const Mat &img; int diff; int *cu_hist; std::mutex &mtx; public: Parallel_process_hist_and_cumulative_pure(const cv::Mat &inputImgage, int *cu_hist, std::mutex &mtx, int diffVal) : img(inputImgage), diff(diffVal), cu_hist(cu_hist), mtx(mtx) {} virtual void operator()(const cv::Range& range) const { for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); int *local_hist = new int[256](); int *local_cu_hist = new int[256](); double curr = 0; for (int r = 0; r < in.rows; ++r) { for (int c = 0; c < in.cols; ++c) { int temp = in.at<uchar>(r, c); local_hist[temp]++; } } for (int k = 0; k < 256; ++k) { curr += local_hist[k]; local_cu_hist[k] += curr; } //double time_start = getTickCount(); mtx.lock(); for (int k = 0; k < 256; ++k) { cu_hist[k] += local_cu_hist[k]; } mtx.unlock(); //double time_end = getTickCount(); //std::cout << "serial_time = " << (time_end - time_start) / getTickFrequency() << std::endl; //waitKey(0); } } }; class Parallel_process_hist_equalization : public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; int diff; int *cu_hist; double total_size; std::mutex &mtx; public: Parallel_process_hist_equalization(const cv::Mat &inputImgage, cv::Mat &outputImgage, int *cu_hist, double total_size, std::mutex &mtx, int diffVal) : img(inputImgage), retVal(outputImgage), diff(diffVal), cu_hist(cu_hist), total_size(total_size), mtx(mtx) {} virtual void operator()(const cv::Range& range) const { for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows / diff)*i, retVal.cols, retVal.rows / diff)); //std::cout << "total_size = " << total_size << std::endl; //for (int r = 0; r < out.rows; ++r) { // for (int c = 0; c < out.cols; ++c) { // out.at<uchar>(r, c) = round((double)cu_hist[in.at<uchar>(r, c)] * 255 / total_size); // } //} cv::Mat_<uchar>::iterator it = in.begin<uchar>(); cv::Mat_<uchar>::const_iterator itend = in.end<uchar>(); cv::Mat_<uchar>::iterator itout = out.begin<uchar>(); for (; it != itend; ++it, ++itout) { *itout = (((double)cu_hist[*it]) * 255) / total_size; } } } }; class Parallel_process_cal_thresh : public cv::ParallelLoopBody { private: int diff; int *hist; std::mutex &mtx; int total_hist_sum; int *threshold_cand; double *var_cand; public: Parallel_process_cal_thresh(int *hist, std::mutex &mtx, int total_hist_sum, int*threshold_cand, double *var_cand, int diffVal) : diff(diffVal), hist(hist), mtx(mtx), total_hist_sum(total_hist_sum), threshold_cand(threshold_cand), var_cand(var_cand) {} virtual void operator()(const cv::Range& range) const { for (int i = range.start; i < range.end; ++i) { /* divide 256 in 'diff' number of parts and process simultaneously */ int chunk_size = 256 / diff; double var_max = -FLT_MAX; int threshold = -1; if (i == range.end) { //remain chunk was dispatched by the last thread chunk_size += 256 % diff; } int start_index = i*chunk_size; int end_index = start_index + chunk_size; for (int j = start_index; j < end_index; ++j) { int wB = 0; int wF = 0; double sumB = 0; double mB = 0; double mF = 0; double var_between = 0; for (int t = 0; t <= j; ++t) { wB += hist[t]; // Weight Background sumB += t * hist[t]; } if (wB == 0) continue; wF = 640 * 480 - wB; // Weight Foreground if (wF == 0) break; mB = sumB / (double)wB; // Mean Background mF = (total_hist_sum - sumB) / (double)wF; // Mean Foregroun var_between = (double)wB * (double)wF * (mB - mF) * (mB - mF);// Calculate Between Class Variance // Check if new maximum found if (var_between > var_max) { var_max = var_between; threshold = j; } } var_cand[i] = var_max; threshold_cand[i] = threshold; } } }; class Parallel_process_apply_threshold : public cv::ParallelLoopBody { private: Mat img; Mat& retVal; int diff; int threshold_value; int type; public: Parallel_process_apply_threshold(cv::Mat inputImgage, cv::Mat& outImage, int threshold_value, int type, int diffVal) : img(inputImgage), retVal(outImage), diff(diffVal), threshold_value(threshold_value), type(type) {} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows / diff)*i, retVal.cols, retVal.rows / diff)); threshold(in, out, threshold_value, 255, type); } } }; class Parallel_process_find_min_max_arr : public cv::ParallelLoopBody { private: const cv::Mat &img; int diff; double *min_val_cand; double *max_val_cand; public: Parallel_process_find_min_max_arr(const cv::Mat &inputImgage, double *min_val_cand, double *max_val_cand, int diffVal) : img(inputImgage), diff(diffVal), min_val_cand(min_val_cand), max_val_cand(max_val_cand) {} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); double min_val; double max_val; minMaxLoc(in, &min_val, &max_val, NULL, NULL); min_val_cand[i] = min_val; max_val_cand[i] = max_val; } } }; class Parallel_process3_find_min_max_arr : public cv::ParallelLoopBody { private: const cv::Mat &img1; const cv::Mat &img2; const cv::Mat &img3; int diff; double *min_val_cand1; double *max_val_cand1; double *min_val_cand2; double *max_val_cand2; double *min_val_cand3; double *max_val_cand3; public: Parallel_process3_find_min_max_arr(const cv::Mat &inputImgage1, const cv::Mat &inputImgage2, const cv::Mat &inputImgage3, double *min_val_cand1, double *max_val_cand1, double *min_val_cand2, double *max_val_cand2, double *min_val_cand3, double *max_val_cand3, int diffVal) : img1(inputImgage1), img2(inputImgage2), img3(inputImgage3), diff(diffVal), min_val_cand1(min_val_cand1), max_val_cand1(max_val_cand1), min_val_cand2(min_val_cand2), max_val_cand2(max_val_cand2), min_val_cand3(min_val_cand3), max_val_cand3(max_val_cand3){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ double min_val; double max_val; cv::Mat in1(img1, cv::Rect(0, (img1.rows / diff)*i, img1.cols, img1.rows / diff)); cv::Mat in2(img2, cv::Rect(0, (img2.rows / diff)*i, img2.cols, img2.rows / diff)); cv::Mat in3(img3, cv::Rect(0, (img3.rows / diff)*i, img3.cols, img3.rows / diff)); minMaxLoc(in1, &min_val, &max_val, NULL, NULL); min_val_cand1[i] = min_val; max_val_cand1[i] = max_val; minMaxLoc(in2, &min_val, &max_val, NULL, NULL); min_val_cand2[i] = min_val; max_val_cand2[i] = max_val; minMaxLoc(in3, &min_val, &max_val, NULL, NULL); min_val_cand3[i] = min_val; max_val_cand3[i] = max_val; } } }; class Parallel_process_moment : public cv::ParallelLoopBody { private: const cv::Mat &img; int diff; double *m10_array; double *m01_array; double *m00_array; int *start_loc; int *end_loc; public: Parallel_process_moment(const cv::Mat &inputImgage, double *m10_array, double *m01_array, double *m00_array, int *start_loc, int *end_loc, int diffVal) : img(inputImgage), diff(diffVal), start_loc(start_loc), end_loc(end_loc), m10_array(m10_array), m01_array(m01_array), m00_array(m00_array) {} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ double local_dM01 = 0; double local_dM10 = 0; double local_dM00 = 0; double max_val; double work_load = img.rows / diff; cv::Mat in(img, cv::Rect(0, (work_load)*i, img.cols, work_load)); for (int r = start_loc[i]; r<=end_loc[i]; ++r) { for (int c = 0; c<in.cols; ++c) { local_dM01 += (double)(in.at<uchar>(r - i*work_load, c))*r; } } for (int r = start_loc[i]; r <= end_loc[i]; ++r) { for (int c = 0; c<in.cols; ++c) { local_dM10 += (double)(in.at<uchar>(r - i*work_load, c))*c; } } for (int r = start_loc[i]; r <= end_loc[i]; ++r) { for (int c = 0; c<in.cols; ++c) { local_dM00 += (double)(in.at<uchar>(r - i*work_load, c)); } } //for (int r = i*work_load; r<work_load*(i + 1); ++r) { // for (int c = 0; c<in.cols; ++c) { // local_dM01 += (double)(in.at<uchar>(r - i*work_load, c))*r; // } //} //for (int r = i*work_load; r<work_load*(i + 1); ++r) { // for (int c = 0; c<in.cols; ++c) { // local_dM10 += (double)(in.at<uchar>(r - i*work_load, c))*c; // } //} //for (int r = i*work_load; r<work_load*(i + 1); ++r) { // for (int c = 0; c<in.cols; ++c) { // local_dM00 += (double)(in.at<uchar>(r - i*work_load, c)); // } //} m01_array[i] = local_dM01; m10_array[i] = local_dM10; m00_array[i] = local_dM00; } } }; class Parallel_process_moment_vector : public cv::ParallelLoopBody { private: const vector<Point> &img; int diff; int total_size; double *m10_array; double *m01_array; double *m00_array; int *start_loc; int *end_loc; public: Parallel_process_moment_vector(const vector<Point> &inputImgage, double *m10_array, double *m01_array, double *m00_array, int *start_loc, int *end_loc, int total_size, int diffVal) : img(inputImgage), diff(diffVal), total_size(total_size), start_loc(start_loc), end_loc(end_loc), m10_array(m10_array), m01_array(m01_array), m00_array(m00_array) {} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ double local_dM01 = 0; double local_dM10 = 0; double local_dM00 = 0; for (int r = start_loc[i]; r<=end_loc[i]; ++r) { int prev_idx = (r == range.start) ? img.size() - 1 : r - 1; double dxy = img[prev_idx].x * img[r].y - img[r].x * img[prev_idx].y; local_dM01 += dxy*(img[r].y+img[prev_idx].y); local_dM10 += dxy*(img[r].x+img[prev_idx].x); local_dM00 += dxy; } m01_array[i] = local_dM01; m10_array[i] = local_dM10; m00_array[i] = local_dM00; } } }; inline double remap(uchar &v, const double &min, const double &max) { return (v - min) / (double)(max - min); } class Parallel_process3_remap : public cv::ParallelLoopBody { private: const cv::Mat3b &img; cv::Mat3b &retVal; int MIN_b, MIN_g, MIN_r; int MAX_b, MAX_g, MAX_r; int diff; public: Parallel_process3_remap(const cv::Mat3b &inputImgage, cv::Mat3b &retVal, int MIN_b, int MAX_b, int MIN_g, int MAX_g, int MIN_r, int MAX_r, int diffVal) : img(inputImgage), retVal(retVal), diff(diffVal), MIN_b(MIN_b), MAX_b(MAX_b), MIN_g(MIN_g), MAX_g(MAX_g), MIN_r(MIN_r), MAX_r(MAX_r){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ double min_val; double max_val; cv::Mat3b in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); cv::Mat3b out(retVal, cv::Rect(0, (retVal.rows / diff)*i, retVal.cols, retVal.rows / diff)); cv::Mat_<cv::Vec3b>::iterator it = in.begin(); cv::Mat_<cv::Vec3b>::const_iterator itend = in.end(); cv::Mat_<cv::Vec3b>::iterator itout = out.begin(); for (; it != itend; ++it, ++itout) { double R_new; double G_new; double B_new; R_new = remap((*it).val[2], MIN_r, MAX_r); G_new = remap((*it).val[1], MIN_g, MAX_g); B_new = remap((*it).val[0], MIN_b, MAX_b); cv::Vec3b vout; vout.val[0] = B_new * 255; vout.val[1] = G_new * 255; vout.val[2] = R_new * 255; *itout = vout; } } } }; class Parallel_cvtColor : public cv::ParallelLoopBody { private: const cv::Mat3b &img; cv::Mat &retVal; int type; int diff; public: Parallel_cvtColor(const cv::Mat3b &inputImgage, cv::Mat &retVal, int type, int diffVal) : img(inputImgage), retVal(retVal), diff(diffVal), type(type){} virtual void operator()(const cv::Range& range) const { //#pragma omp parallel for for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat3b in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows / diff)*i, retVal.cols, retVal.rows / diff)); cvtColor(in, out, COLOR_BGRA2GRAY); } } }; class Parallel_cvtColor_my_ver : public cv::ParallelLoopBody { private: const cv::Mat3b &img; cv::Mat &retVal; int diff; public: Parallel_cvtColor_my_ver(const cv::Mat3b &inputImgage, cv::Mat &retVal, int diffVal) : img(inputImgage), retVal(retVal), diff(diffVal) {} virtual void operator()(const cv::Range& range) const { for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat3b in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows / diff)*i, retVal.cols, retVal.rows / diff)); //cvtColor(in, out, COLOR_BGRA2GRAY); cv::Mat_<cv::Vec3b>::iterator it = in.begin(); cv::Mat_<cv::Vec3b>::const_iterator itend = in.end(); cv::Mat_<uchar>::iterator itout = out.begin<uchar>(); for (; it != itend; ++it, ++itout) { *itout = 0.299*(*it).val[2]+0.587*(*it).val[1]+0.114*(*it).val[0]; } } } }; inline void ParallelOtsu(const cv::Mat &inputImgage, cv::Mat& outImage, int type, int thread_num) { int *threshold_cand = new int[thread_num](); double *var_cand = new double[thread_num](); int *hist = new int[256](); int threshold_otsu = 0; double var_final = -1; int total_hist_sum = 0; std::mutex mtx; cv::parallel_for_(cv::Range(0, thread_num), Parallel_process_hist(inputImgage, hist, mtx, total_hist_sum, thread_num)); cv::parallel_for_(cv::Range(0, thread_num), Parallel_process_cal_thresh(hist, mtx, total_hist_sum, threshold_cand, var_cand, thread_num)); for (int i = 0; i < thread_num; ++i) { if (var_final <= var_cand[i]) { var_final = var_cand[i]; threshold_otsu = threshold_cand[i]; } } cv::parallel_for_(cv::Range(0, thread_num), Parallel_process_apply_threshold(inputImgage, outImage, threshold_otsu, type, thread_num)); delete [] var_cand; delete [] threshold_cand; } inline void ParallelHistEqual(const cv::Mat &inputImgage, cv::Mat& outImage, int thread_num) { int *cu_hist = new int[256](); std::mutex mtx; cv::parallel_for_(cv::Range(0, thread_num), Parallel_process_hist_and_cumulative_pure(inputImgage, cu_hist, mtx, thread_num)); cv::parallel_for_(cv::Range(0, thread_num), Parallel_process_hist_equalization(inputImgage, outImage, cu_hist, inputImgage.rows*inputImgage.cols, mtx, thread_num)); delete [] cu_hist; } void SerialHist(cv::Mat inputImgage, cv::Mat& outImage, int *hist) { for (int r = 0; r<inputImgage.rows; ++r) { for (int c = 0; c<inputImgage.cols; ++c) { int temp = inputImgage.at<uchar>(r, c); hist[temp]++; } } } void SerialOtsu(cv::Mat inputImgage, int *hist, int total_hist_sum, int &threshold) { int wB = 0; int wF = 0; int total = 640 * 480; double sumB = 0; double varMax = -FLT_MAX; for (int t = 0; t<256; t++) { wB += hist[t]; // Weight Background if (wB == 0) continue; wF = total - wB; // Weight Foreground if (wF == 0) break; sumB += (float)(t * hist[t]); double mB = sumB / (double)wB; // Mean Background double mF = (total_hist_sum - sumB) / (double)wF; // Mean Foreground // Calculate Between Class Variance double varBetween = (double)wB * (double)wF * (mB - mF) * (mB - mF); // Check if new maximum found if (varBetween > varMax) { varMax = varBetween; threshold = t; } } } class Parallel_process_converto_min_max : public cv::ParallelLoopBody { private: const Mat &img; Mat& retVal; int diff; double min_val_src; double max_val_src; double min_to_val; double max_to_val; public: Parallel_process_converto_min_max(const cv::Mat &inputImgage, cv::Mat &outputImgage, double min_val_src, double max_val_src, double min_to_val, double max_to_val, int diffVal) : img(inputImgage), retVal(outputImgage), diff(diffVal), min_val_src(min_val_src), max_val_src(max_val_src), min_to_val(min_to_val), max_to_val(max_to_val){} virtual void operator()(const cv::Range& range) const { for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat in(img, cv::Rect(0, (img.rows / diff)*i, img.cols, img.rows / diff)); cv::Mat out(retVal, cv::Rect(0, (retVal.rows / diff)*i, retVal.cols, retVal.rows / diff)); cv::Mat_<uchar>::iterator it = in.begin<uchar>(); cv::Mat_<uchar>::const_iterator itend = in.end<uchar>(); cv::Mat_<uchar>::iterator itout = out.begin<uchar>(); for (; it != itend; ++it, ++itout) { *itout = ((double)((*it) - min_val_src)*(max_to_val - min_to_val) / (max_val_src - min_val_src)) + min_to_val; } } } }; class Parallel_process_calculate_iris_rate: public cv::ParallelLoopBody { private: const Mat &model_mask; const Mat &model_intensity; int *pixel_in_iris; int *pixel_in_others; int diff; public: Parallel_process_calculate_iris_rate(const cv::Mat &model_mask, const cv::Mat &model_intensity, int *pixel_in_iris, int *pixel_in_others, int diffVal) : model_mask(model_mask), model_intensity(model_intensity), diff(diffVal), pixel_in_iris(pixel_in_iris), pixel_in_others(pixel_in_others) {} virtual void operator()(const cv::Range& range) const { for (int i = range.start; i < range.end; ++i) { /* divide image in 'diff' number of parts and process simultaneously */ cv::Mat model_in(model_mask, cv::Rect(0, (model_mask.rows / diff)*i, model_mask.cols, model_mask.rows / diff)); cv::Mat module_intensity_in(model_intensity, cv::Rect(0, (model_intensity.rows / diff)*i, model_intensity.cols, model_intensity.rows / diff)); int loc_pixel_in_iris = 0; int loc_pixel_in_others = 0; cv::Mat_<uchar>::iterator it = model_in.begin<uchar>(); cv::Mat_<uchar>::const_iterator itend = model_in.end<uchar>(); cv::Mat_<uchar>::iterator it_model_intensity = module_intensity_in.begin<uchar>(); for (; it != itend; ++it, ++it_model_intensity) { if (*it == 255) { if (*it_model_intensity == 255) { ++loc_pixel_in_iris; }else { ++loc_pixel_in_others; } } } pixel_in_iris[i] = loc_pixel_in_iris; pixel_in_others[i] = loc_pixel_in_others; } } }; inline void ParallelCalcIrisRate(const cv::Mat &model_mask, const cv::Mat &model_intensity, float &iris_rate, int &pixel_in_iris, int &pixel_in_others, const int thread_num) { int *pixel_in_iris_arr = new int[thread_num](); int *pixel_in_others_arr = new int[thread_num](); int global_pixel_in_iris = 0; int global_pixel_in_others = 1; cv::parallel_for_(cv::Range(0, thread_num), Parallel_process_calculate_iris_rate(model_mask, model_intensity, pixel_in_iris_arr, pixel_in_others_arr, thread_num)); for (int i = 0; i < thread_num; ++i) { global_pixel_in_iris += pixel_in_iris_arr[i]; global_pixel_in_others += pixel_in_others_arr[i]; } iris_rate = global_pixel_in_iris / (float)global_pixel_in_others; //std::cout << std::endl << "parallel, global_pixel_in_iris = " << global_pixel_in_iris << std::endl; //std::cout << std::endl << "parallel, global_pixel_in_others = " << global_pixel_in_others << std::endl; //std::cout << std::endl << "parallel, iris_rate = " << iris_rate << std::endl; pixel_in_iris = global_pixel_in_iris; pixel_in_others = global_pixel_in_others; delete [] pixel_in_iris_arr; delete [] pixel_in_others_arr; } #endif
has-include-1.c
/* { dg-do compile } */ void foo (void) { #pragma omp parallel if (__has_include ("<stdlib.h>")) /* { dg-error "used outside of preprocessing directive" } */ ; }
linear_system_solver.c
// // Created by sachetto on 04/10/17. // #include "../config/linear_system_solver_config.h" #include "../config_helpers/config_helpers.h" #include "../libraries_common/common_data_structures.h" #include "../single_file_libraries/stb_ds.h" #include "../models_library/model_gpu_utils.h" bool jacobi_initialized = false; bool bcg_initialized = false; static bool use_preconditioner = false; static int max_its = 50; static real_cpu tol = 1e-16; #include <cusparse_v2.h> #include <cublas_v2.h> #ifdef COMPILE_CUDA static int *d_col, *d_row; static real *d_val, *d_x; static real *d_r, *d_p, *d_Ax; static int N = 0, nz = 0; /* Get handle to the CUBLAS context */ static cublasHandle_t cublasHandle = 0; static cublasStatus_t cublasStatus; /* Get handle to the CUSPARSE context */ static cusparseHandle_t cusparseHandle = 0; static cusparseStatus_t cusparseStatus; cusparseMatDescr_t descr = 0; static int nzILU0; static real *d_valsILU0, *d_zm1, *d_zm2, *d_rm2, *d_y; static cusparseSolveAnalysisInfo_t infoA = 0; static cusparseSolveAnalysisInfo_t info_u; static cusparseMatDescr_t descrL = 0; static cusparseMatDescr_t descrU = 0; INIT_LINEAR_SYSTEM(init_gpu_conjugate_gradient) { int_array I = NULL, J = NULL; f32_array val = NULL; GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real_cpu, tol, config->config_data, "tolerance"); GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data, "max_iterations"); GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(use_preconditioner, config->config_data, "use_preconditioner"); check_cuda_error((cudaError_t)cublasCreate(&cublasHandle)); check_cuda_error((cudaError_t)cusparseCreate(&cusparseHandle)); check_cuda_error((cudaError_t)cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO); grid_to_csr(the_grid, &val, &I, &J); uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; nz = arrlen(val); N = num_active_cells; check_cuda_error(cudaMalloc((void **) &d_col, nz * sizeof(int))); check_cuda_error(cudaMalloc((void **) &d_row, (N + 1) * sizeof(int))); check_cuda_error(cudaMalloc((void **) &d_val, nz * sizeof(float))); check_cuda_error(cudaMalloc((void **) &d_x, N * sizeof(float))); check_cuda_error(cudaMalloc((void **) &d_r, N * sizeof(float))); check_cuda_error(cudaMalloc((void **) &d_p, N * sizeof(float))); check_cuda_error(cudaMalloc((void **) &d_Ax, N * sizeof(float))); cudaMemcpy(d_col, J, nz * sizeof(int), cudaMemcpyHostToDevice); //JA cudaMemcpy(d_row, I, (N + 1) * sizeof(int), cudaMemcpyHostToDevice); //IA cudaMemcpy(d_val, val, nz * sizeof(float), cudaMemcpyHostToDevice); //A real *rhs = (real*) malloc(sizeof(real)*num_active_cells); #pragma omp parallel for for (uint32_t i = 0; i < num_active_cells; i++) { rhs[i] = ac[i]->b; } check_cuda_error(cudaMemcpy(d_x, rhs, N * sizeof(float), cudaMemcpyHostToDevice)); //Result if(use_preconditioner) { nzILU0 = 2*N-1; check_cuda_error(cudaMalloc((void **)&d_valsILU0, nz*sizeof(float))); check_cuda_error(cudaMalloc((void **)&d_zm1, (N)*sizeof(float))); check_cuda_error(cudaMalloc((void **)&d_zm2, (N)*sizeof(float))); check_cuda_error(cudaMalloc((void **)&d_rm2, (N)*sizeof(float))); check_cuda_error(cudaMalloc((void **)&d_y, N*sizeof(float))); cusparseStatus = cusparseCreateSolveAnalysisInfo(&infoA); check_cuda_error((cudaError_t)cusparseStatus); /* Perform the analysis for the Non-Transpose case */ cusparseStatus = cusparseScsrsv_analysis(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nz, descr, d_val, d_row, d_col, infoA); check_cuda_error((cudaError_t)cusparseStatus); /* Copy A data to ILU0 vals as input*/ cudaMemcpy(d_valsILU0, d_val, nz*sizeof(float), cudaMemcpyDeviceToDevice); /* generate the Incomplete LU factor H for the matrix A using cudsparseScsrilu0 */ cusparseStatus = cusparseScsrilu0(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, descr, d_valsILU0, d_row, d_col, infoA); check_cuda_error((cudaError_t)cusparseStatus); cusparseCreateSolveAnalysisInfo(&info_u); cusparseStatus = cusparseCreateMatDescr(&descrL); cusparseSetMatType(descrL,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrL,CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatFillMode(descrL, CUSPARSE_FILL_MODE_LOWER); cusparseSetMatDiagType(descrL, CUSPARSE_DIAG_TYPE_UNIT); cusparseStatus = cusparseCreateMatDescr(&descrU); cusparseSetMatType(descrU,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrU,CUSPARSE_INDEX_BASE_ZERO); cusparseSetMatFillMode(descrU, CUSPARSE_FILL_MODE_UPPER); cusparseSetMatDiagType(descrU, CUSPARSE_DIAG_TYPE_NON_UNIT); cusparseStatus = cusparseScsrsv_analysis(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nz, descrU, d_val, d_row, d_col, info_u); } free(rhs); arrfree(I); arrfree(J); arrfree(val); } END_LINEAR_SYSTEM(end_gpu_conjugate_gradient) { check_cuda_error( (cudaError_t)cusparseDestroy(cusparseHandle) ); check_cuda_error( (cudaError_t)cublasDestroy(cublasHandle) ); check_cuda_error( (cudaError_t)cusparseDestroyMatDescr(descr)); check_cuda_error( (cudaError_t)cusparseDestroyMatDescr(descrL)); check_cuda_error( (cudaError_t)cusparseDestroyMatDescr(descrU)); /* Destroy parameters */ cusparseDestroySolveAnalysisInfo(infoA); cusparseDestroySolveAnalysisInfo(info_u); check_cuda_error(cudaFree(d_row)); check_cuda_error(cudaFree(d_val)); check_cuda_error(cudaFree(d_x)); check_cuda_error(cudaFree(d_r)); check_cuda_error(cudaFree(d_p)); check_cuda_error(cudaFree(d_Ax)); check_cuda_error(cudaFree(d_y)); check_cuda_error(cudaFree(d_valsILU0)); check_cuda_error(cudaFree(d_zm1)); check_cuda_error(cudaFree(d_zm2)); check_cuda_error(cudaFree(d_rm2)); } SOLVE_LINEAR_SYSTEM(gpu_conjugate_gradient) { /* Conjugate gradient without preconditioning. ------------------------------------------ Follows the description by Golub & Van Loan, "Matrix Computations 3rd ed.", Section 10.2.6 */ real dot; real a, b, na, r0, r1; int k; real alpha, beta, alpham1; real *rhs; //Vector B uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; rhs = (real*) malloc(sizeof(real)*num_active_cells); #pragma omp parallel for for (uint32_t i = 0; i < num_active_cells; i++) { rhs[i] = ac[i]->b; } cudaMemcpy(d_r, rhs, N * sizeof(float), cudaMemcpyHostToDevice); //B alpha = 1.0; alpham1 = -1.0; beta = 0.0; r0 = 0.; real numerator, denominator; cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); k = 1; while (r1 >= tol && k <= max_its) { if(use_preconditioner) { // Forward Solve, we can re-use infoA since the sparsity pattern of A matches that of L cusparseStatus = cusparseScsrsv_solve(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, &alpha, descrL, d_valsILU0, d_row, d_col, infoA, d_r, d_y); check_cuda_error((cudaError_t)cusparseStatus); // Back Substitution cusparseStatus = cusparseScsrsv_solve(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, &alpha, descrU, d_valsILU0, d_row, d_col, info_u, d_y, d_zm1); check_cuda_error((cudaError_t)cusparseStatus); } if (k > 1) { if(use_preconditioner) { cublasSdot(cublasHandle, N, d_r, 1, d_zm1, 1, &numerator); cublasSdot(cublasHandle, N, d_rm2, 1, d_zm2, 1, &denominator); b = numerator/denominator; cublasSscal(cublasHandle, N, &b, d_p, 1); cublasSaxpy(cublasHandle, N, &alpha, d_zm1, 1, d_p, 1) ; } else { b = r1 / r0; cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); } } else { if(use_preconditioner) { cublasScopy(cublasHandle, N, d_zm1, 1, d_p, 1); } else { cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); } } if(use_preconditioner) { cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nzILU0, &alpha, descrU, d_val, d_row, d_col, d_p, &beta, d_Ax); cublasSdot(cublasHandle, N, d_r, 1, d_zm1, 1, &numerator); cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &denominator); a = numerator / denominator; cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); cublasScopy(cublasHandle, N, d_r, 1, d_rm2, 1); cublasScopy(cublasHandle, N, d_zm1, 1, d_zm2, 1); na = -a; cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); r0 = r1; cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); } else { cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); a = r1 / dot; cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); na = -a; cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); r0 = r1; cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); } cudaDeviceSynchronize(); k++; } cudaMemcpy(rhs, d_x, N*sizeof(real), cudaMemcpyDeviceToHost); *number_of_iterations = k-1; *error = r1; #pragma omp parallel for for (uint32_t i = 0; i < num_active_cells; i++) { ac[i]->v = rhs[i]; } free(rhs); } #endif INIT_LINEAR_SYSTEM(init_cpu_conjugate_gradient) { GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real_cpu, tol, config->config_data, "tolerance"); GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(use_preconditioner, config->config_data, "use_preconditioner"); GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data, "max_iterations"); } END_LINEAR_SYSTEM(end_cpu_conjugate_gradient) { } SOLVE_LINEAR_SYSTEM(cpu_conjugate_gradient) { real_cpu rTr, r1Tr1, pTAp, alpha, beta, precision = tol, rTz, r1Tz1; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; *error = 1.0; *number_of_iterations = 1; //__________________________________________________________________________ //Computes int_vector A*x, residue r = b - Ax, scalar rTr = r^T * r and //sets initial search direction p. rTr = 0.0; rTz = 0.0; struct element element; uint32_t i; #pragma omp parallel for private (element) reduction(+:rTr,rTz) for (i = 0; i < num_active_cells; i++) { if(CG_INFO(ac[i]) == NULL) { INITIALIZE_CONJUGATE_GRADIENT_INFO(ac[i]); } struct element *cell_elements = ac[i]->elements; ac[i]->Ax = 0.0; size_t max_el = arrlen(cell_elements); for(int el = 0; el < max_el; el++) { element = cell_elements[el]; ac[i]->Ax += element.value * element.cell->v; } CG_R(ac[i]) = ac[i]->b - ac[i]->Ax; if(use_preconditioner) { real_cpu value = cell_elements[0].value; if(value == 0.0) value = 1.0; CG_Z(ac[i]) = (1.0/value) * CG_R(ac[i]); // preconditioner rTz += CG_R(ac[i]) * CG_Z(ac[i]); CG_P(ac[i]) = CG_Z(ac[i]); } else { CG_P(ac[i]) = CG_R(ac[i]); } real_cpu r = CG_R(ac[i]); rTr += r * r; } *error = rTr; //__________________________________________________________________________ //Conjugate gradient iterations. if( *error >= precision ) { while( *number_of_iterations < max_its ) { //__________________________________________________________________ // Computes Ap and pTAp. Uses Ax to store Ap. pTAp = 0.0; #pragma omp parallel for private(element) reduction(+ : pTAp) for (i = 0; i < num_active_cells; i++) { ac[i]->Ax = 0.0; struct element *cell_elements = ac[i]->elements; size_t max_el = arrlen(cell_elements); for(int el = 0; el < max_el; el++) { element = cell_elements[el]; ac[i]->Ax += element.value * CG_P(element.cell); } pTAp += CG_P(ac[i]) * ac[i]->Ax; } //__________________________________________________________________ // Computes alpha. if(use_preconditioner) { alpha = rTz/pTAp; } else { alpha = rTr/pTAp; } //__________________________________________________________________ r1Tr1 = 0.0; r1Tz1 = 0.0; // Computes new value of solution: u = u + alpha*p. #pragma omp parallel for reduction (+:r1Tr1,r1Tz1) for (i = 0; i < num_active_cells; i++) { ac[i]->v += alpha * CG_P(ac[i]); CG_R(ac[i]) -= alpha * ac[i]->Ax; real_cpu r = CG_R(ac[i]); if(use_preconditioner) { real_cpu value = ac[i]->elements[0].value; if(value == 0.0) value = 1.0; CG_Z(ac[i]) = (1.0/value) * r; r1Tz1 += CG_Z(ac[i]) * r; } r1Tr1 += r * r; } //__________________________________________________________________ //Computes beta. if(use_preconditioner) { beta = r1Tz1/rTz; } else { beta = r1Tr1/rTr; } *error = r1Tr1; *number_of_iterations = *number_of_iterations + 1; if( *error <= precision ) { break; } //__________________________________________________________________ //Computes int_vector p1 = r1 + beta*p and uses it to upgrade p. #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { if(use_preconditioner) { CG_P1(ac[i]) = CG_Z(ac[i]) + beta * CG_P(ac[i]); } else { CG_P1(ac[i]) = CG_R(ac[i]) + beta * CG_P(ac[i]); } CG_P(ac[i]) = CG_P1(ac[i]); } rTz = r1Tz1; rTr = r1Tr1; } }//end of conjugate gradient iterations. }//end conjugateGradient() function. SOLVE_LINEAR_SYSTEM(conjugate_gradient) { bool gpu = false; GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(gpu, config->config_data, "use_gpu"); if(gpu) { #ifdef COMPILE_CUDA gpu_conjugate_gradient(config, the_grid, number_of_iterations, error); #else print_to_stdout_and_file("Cuda runtime not found in this system. Fallbacking to CPU solver!!\n"); cpu_conjugate_gradient(config, the_grid, number_of_iterations, error); #endif } else { cpu_conjugate_gradient(config, the_grid, number_of_iterations, error); } } INIT_LINEAR_SYSTEM(init_conjugate_gradient) { bool gpu = false; GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(gpu, config->config_data, "use_gpu"); if(gpu) { #ifdef COMPILE_CUDA init_gpu_conjugate_gradient(config, the_grid); #else print_to_stdout_and_file("Cuda runtime not found in this system. Fallbacking to CPU solver!!\n"); cpu_conjugate_gradient(config, the_grid, number_of_iterations, error); #endif } else { init_cpu_conjugate_gradient(config, the_grid); } } END_LINEAR_SYSTEM(end_conjugate_gradient) { bool gpu = false; GET_PARAMETER_BOOLEAN_VALUE_OR_USE_DEFAULT(gpu, config->config_data, "use_gpu"); if(gpu) { #ifdef COMPILE_CUDA end_gpu_conjugate_gradient(config); #else print_to_stdout_and_file("Cuda runtime not found in this system. Fallbacking to CPU solver!!\n"); cpu_conjugate_gradient(config, the_grid, number_of_iterations, error); #endif } else { end_cpu_conjugate_gradient(config); } } // Berg's code SOLVE_LINEAR_SYSTEM(jacobi) { if(!jacobi_initialized) { GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real_cpu, tol, config->config_data, "tolerance"); max_its = 500; GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data, "max_iterations"); jacobi_initialized = true; } real_cpu sigma, precision = tol; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; *error = 1.0; *number_of_iterations = 1; struct element element; int i; if (*error >= precision) { //__________________________________________________________________________ //Jacobi iterations. while (*number_of_iterations < max_its) { #pragma omp parallel for private (element,sigma) for (i = 0; i < num_active_cells; i++) { if(JACOBI_INFO(ac[i]) == NULL) { INITIALIZE_JACOBI_INFO(ac[i]); } struct element *cell_elements = ac[i]->elements; sigma = 0.0; size_t max_el = arrlen(cell_elements); // Do not take the diagonal element for(int el = 1; el < max_el; el++) { element = cell_elements[el]; sigma += element.value * element.cell->v; } real_cpu value = cell_elements[0].value; JACOBI_X_AUX(ac[i]) = (1.0/value)*(ac[i]->b - sigma); } real_cpu residue = 0.0; real_cpu sum; #pragma omp parallel for private (element,sum) reduction (+:residue) for (i = 0; i < num_active_cells; i++) { struct element *cell_elements = ac[i]->elements; size_t max_el = arrlen(cell_elements); // Do not take the diagonal element sum = 0.0; for(int el = 0; el < max_el; el++) { element = cell_elements[el]; sum += element.value * JACOBI_X_AUX(element.cell); } ac[i]->v = JACOBI_X_AUX(ac[i]); residue += pow(ac[i]->b - sum,2); } // The error is norm of the residue residue = sqrt(residue); *error = residue; *number_of_iterations = *number_of_iterations + 1; if( *error <= precision ) break; } } } //// Berg's code SOLVE_LINEAR_SYSTEM(biconjugate_gradient) { if(!bcg_initialized) { GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real_cpu, tol, config->config_data, "tolerance"); char *preconditioner_char = NULL; GET_PARAMETER_VALUE_CHAR_OR_USE_DEFAULT(preconditioner_char, config->config_data, "use_preconditioner"); if (preconditioner_char != NULL) { use_preconditioner = ((strcmp (preconditioner_char, "yes") == 0) || (strcmp (preconditioner_char, "true") == 0)); } max_its = 100; GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(int, max_its, config->config_data, "max_iterations"); bcg_initialized = true; } real_cpu rTr, r1Tr1, pTAp, alpha, beta, precision = tol, rTz, r1Tz1; uint32_t num_active_cells = the_grid->num_active_cells; struct cell_node** ac = the_grid->active_cells; *error = 1.0; *number_of_iterations = 1; struct element element; int i; //__________________________________________________________________________ // Zero all entries on the int_vector x*A // And initialize the second guess vector x_aux #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { if(BCG_INFO(ac[i]) == NULL) { INITIALIZE_BICONJUGATE_GRADIENT_INFO(ac[i]); } BCG_XA(ac[i]) = 0.0; BCG_X_AUX(ac[i]) = ac[i]->v; } //__________________________________________________________________________ //Computes int_vector A*x, x*A //xA must be fully calculated to start doing anything over the r_aux vector #pragma omp parallel for private (element) for (i = 0; i < num_active_cells; i++) { struct element *cell_elements = ac[i]->elements; ac[i]->Ax = 0.0; size_t max_el = arrlen(cell_elements); for(int el = 0; el < max_el; el++) { element = cell_elements[el]; uint32_t col = element.column; ac[i]->Ax += element.value * element.cell->v; #pragma omp critical BCG_XA(ac[col]) += element.value * BCG_X_AUX(ac[i]); } } rTr = 0.0; rTz = 0.0; //__________________________________________________________________________ //Computes residues r, r_aux //scalar rTr = r^T * r_aux and //sets initial search directions p and p_aux. #pragma omp parallel for private (element) reduction(+:rTr,rTz) for (i = 0; i < num_active_cells; i++) { struct element *cell_elements = ac[i]->elements; BCG_R(ac[i]) = ac[i]->b - ac[i]->Ax; BCG_R_AUX(ac[i]) = ac[i]->b - BCG_XA(ac[i]); if(use_preconditioner) { real_cpu value = cell_elements[0].value; if(value == 0.0) value = 1.0; BCG_Z(ac[i]) = (1.0/value) * BCG_R(ac[i]); // preconditioner BCG_Z_AUX(ac[i]) = (1.0/value) * BCG_R_AUX(ac[i]); rTz += BCG_R_AUX(ac[i]) * BCG_Z(ac[i]); BCG_P(ac[i]) = BCG_Z(ac[i]); BCG_P_AUX(ac[i]) = BCG_Z_AUX(ac[i]); } else { BCG_P(ac[i]) = BCG_R(ac[i]); BCG_P_AUX(ac[i])= BCG_R_AUX(ac[i]); } rTr += BCG_R_AUX(ac[i]) * BCG_R(ac[i]); } *error = rTr; //__________________________________________________________________________ //Biconjugate gradient iterations. if( *error >= precision ) { while( *number_of_iterations < max_its ) { //__________________________________________________________________ // Computes Ap, pA and pTAp. Uses Ax to store Ap and xA to store pA pTAp = 0.0; #pragma omp parallel for for (i = 0; i < num_active_cells; i++) BCG_XA(ac[i]) = 0.0; #pragma omp parallel for private(element) reduction(+ : pTAp) for (i = 0; i < num_active_cells; i++) { ac[i]->Ax = 0.0; struct element *cell_elements = ac[i]->elements; size_t max_el = arrlen(cell_elements); for(int el = 0; el < max_el; el++) { element = cell_elements[el]; uint32_t col = element.column; ac[i]->Ax += element.value * BCG_P(element.cell); #pragma omp critical BCG_XA(ac[col]) += element.value * BCG_P_AUX(ac[i]); } pTAp += BCG_P_AUX(ac[i]) * ac[i]->Ax; } //__________________________________________________________________ // Computes alpha. if(use_preconditioner) { alpha = rTz/pTAp; } else { alpha = rTr/pTAp; } //__________________________________________________________________ r1Tr1 = 0.0; r1Tz1 = 0.0; // Computes new value of solution: u = u + alpha*p. // u_aux = u_aux + alpha*p_aux #pragma omp parallel for reduction (+:r1Tr1,r1Tz1) for (i = 0; i < num_active_cells; i++) { ac[i]->v += alpha * BCG_P(ac[i]); BCG_X_AUX(ac[i]) += alpha * BCG_P_AUX(ac[i]); BCG_R(ac[i]) -= alpha * ac[i]->Ax; BCG_R_AUX(ac[i]) -= alpha * BCG_XA(ac[i]); if(use_preconditioner) { real_cpu value = ac[i]->elements[0].value; if(value == 0.0) value = 1.0; BCG_Z(ac[i]) = (1.0/value) * BCG_R(ac[i]); BCG_Z_AUX(ac[i]) = (1.0/value) * BCG_R_AUX(ac[i]); r1Tz1 += BCG_Z(ac[i]) * BCG_R_AUX(ac[i]); } r1Tr1 += BCG_R(ac[i]) * BCG_R_AUX(ac[i]); } //__________________________________________________________________ //Computes beta. if(use_preconditioner) { beta = r1Tz1/rTz; } else { beta = r1Tr1/rTr; } *error = r1Tr1; *number_of_iterations = *number_of_iterations + 1; if( *error <= precision ) { break; } //__________________________________________________________________ //Computes int_vector p1 = r1 + beta*p and uses it to upgrade p. #pragma omp parallel for for (i = 0; i < num_active_cells; i++) { if(use_preconditioner) { BCG_P1(ac[i]) = BCG_Z(ac[i]) + beta * BCG_P(ac[i]); BCG_P1_AUX(ac[i]) = BCG_Z_AUX(ac[i]) + beta * BCG_P_AUX(ac[i]); } else { BCG_P1(ac[i]) = BCG_R(ac[i]) + beta * BCG_P(ac[i]); BCG_P1_AUX(ac[i]) = BCG_R_AUX(ac[i]) + beta * BCG_P_AUX(ac[i]); } BCG_P(ac[i]) = BCG_P1(ac[i]); BCG_P_AUX(ac[i]) = BCG_P1_AUX(ac[i]); } rTz = r1Tz1; rTr = r1Tr1; } }//end of biconjugate gradient iterations. }//end biconjugateGradient() function.
validate_yolo8.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_256_68_68_128_3_3.h" #include "gen_ukr_A4B2gemm_1_256_68_68_128_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 68; int Ny = 68; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } //int Tc1 = 8; // 1 8 16 32 48 //int Txy3 = 12*8; // 18, 36, 72, 144 //int Tf2 = 144; // 32, 64, 128 ,256 #pragma omp barrier// begin push button generated block for(int xy5=0;xy5<4624+0;xy5+=4624) { for(int f5=0;f5<256+0;f5+=256) { for(int c5=0;c5<128+0;c5+=128) { // full space for(int c4=c5;c4<min(128, 128+c5);c4+=128) { for(int f4=f5;f4<min(256, 256+f5);f4+=Tf2) { for(int xy4=xy5;xy4<min(4624, 4624+xy5);xy4+=4624) { for(int c3=c4;c3<min(128, 128+c4);c3+=Tc1) // FUll { for(int xy3=xy4;xy3<min(4624, 4624+xy4);xy3+=Txy3) //Tc1, Nxy, Nf { for(int f3=f4;f3<min(256, Tf2+f4);f3+=Tf2) // Tc1, Txy3, Nf { for(int xy2=xy3;xy2<min(4624, Txy3+xy3);xy2+=6) //Tc1, Txy3, Tf2 { for(int f2=f3;f2<min(256, Tf2+f3);f2+=16) // Tc1, 6, Tf2 { for(int c2=c3;c2<min(128, Tc1+c3);c2+=Tc1) // Tc1, 6, 16 { for(int c1=c2;c1<min(128, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(4624, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(256, 16+f2);f1+=16) { int ctile=min(Tc1, 128-c1); int x1=xy1/68; int y1=xy1%68/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*627200+c1_1*4900+1*x1*70+1*y1*1+c1_2*1; int offsetB=0+kf1_1*18432+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*1183744+of1_1*4624+x1*68+y1*1+of1_2*1; if(68-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(68*68-xy1>=6){ for(int sti=68-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=68-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
denseAsyncBlocksJacobi.h
// // Created by mbarb on 14/02/2018. // #ifndef PARALLELITERATIVE_DENSEASYNCBLOCKJACOBI_H #define PARALLELITERATIVE_DENSEASYNCBLOCKJACOBI_H #include <Eigen> #include <iostream> #include <omp.h> #include "utils.h" #include "denseParallelJacobi.h" namespace Iterative { template <typename Scalar, long long SIZE> class denseAsyncBlocksJacobi : public denseParallelJacobi<Scalar, SIZE> { public: /** * * @param matrix linear system matrix * @param vector known term vector * @param iterations max number of iterations * @param tolerance min error tolerated * @param workers number of threads * @param blockSize size of the block */ explicit denseAsyncBlocksJacobi( const Eigen::Matrix<Scalar, SIZE, SIZE>& matrix, const Eigen::ColumnVector<Scalar, SIZE>& vector, const ulonglong iterations, const Scalar tolerance, const ulong workers=0L, const ulonglong blockSize = 0L): denseParallelJacobi<Scalar,SIZE>::denseParallelJacobi(matrix, vector, iterations, tolerance, workers) { this->blockSize = blockSize; if (blockSize == 0) this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong) 1L); splitter(); } const Eigen::ColumnVector<Scalar, SIZE> solve() { Eigen::ColumnVector<Scalar, SIZE> oldSolution(this->solution); std::vector<Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>> inverses(blocks.size()); // compute the inverses of the blocks and memorize it #pragma omp parallel for for (int i = 0; i < blocks.size(); ++i) { inverses[i] = this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols, blocks[i].rows).inverse(); } std::vector<int> index; auto stop = false; for (this->iteration=0L; this->iteration < this->iterations && !stop; ++this->iteration) { #pragma omp parallel #pragma omp for private(oldSolution) schedule(dynamic) nowait for (int i = 0; i < inverses.size(); ++i) { oldSolution = this->solution; // set zero the components of the solution b that corresponds to the inverse Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldBlock = oldSolution.segment( blocks[i].startCol, blocks[i].cols); auto zeroBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols); zeroBlock.setZero(); // the segment of the solution b that this inverse approximates auto block = this->solution.segment(blocks[i].startCol, blocks[i].cols); // approximate the solution using the inverse and the solution at the previous iteration block = inverses[i] * (this->b - (this->A * oldSolution)).segment(blocks[i].startCol, blocks[i].cols); zeroBlock = block; if ((oldBlock - block).template lpNorm<1>() / block.size() <= this->tolerance) { #pragma omp critical index.emplace_back(i); } } if (!index.empty()) { #pragma omp barrier #pragma omp single { std::sort(index.rbegin(), index.rend()); for (auto i : index) { blocks.erase(blocks.begin() + i); inverses.erase(inverses.begin() + i); } index.clear(); stop = inverses.empty(); }; } } #pragma omp barrier std::cout << this->iteration << std::endl; return this->solution; } protected: ulonglong blockSize; std::vector<Index> blocks; void splitter() { for (ulonglong i = 0; i < this->A.cols(); i += blockSize) { blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong) this->A.cols() - i), i, std::min(blockSize, (ulonglong) this->A.rows() - i))); } } private: }; } #endif //PARALLELITERATIVE_ASYNCJACOBI_H
utils.c
#include "fft.h" #include <string.h> #include <assert.h> #include <stdlib.h> #include "utils.h" #include <stdio.h> #include "wrapper.h" int compare (const void * a, const void * b) { //return ( *(double*)a - *(double*)b ); return((*(double*)a-*(double*)b>0)?1:-1); } int comp_struct(const void *a,const void *b) { Node *aa=(Node *)a; Node *bb=(Node *)b; return(((cabs(aa->value))<(cabs(bb->value)))?1:-1); } int comp_struct2(const void *a,const void *b) { Node *aa=(Node *)a; Node *bb=(Node *)b; if((aa->key)==(bb->key)) return 0; return((aa->key)>((bb->key))?1:-1); } int comp_struct3(const void *a,const void *b) { Pair *aa=(Pair *)a; Pair *bb=(Pair *)b; if((aa->first)==(bb->first)) return 0; return((aa->first)>((bb->first))?1:-1); } int comp_struct4(const void *a,const void *b) { Pair *aa=(Pair *)a; Pair *bb=(Pair *)b; //if((aa->first)==(bb->first)) // return 0; return((aa->first) - ((bb->first))); } void shift(complex_t *x, int n, int r){ r = (n + r)%n; assert(n >= r); complex_t *tmp = (complex_t *)malloc(r * sizeof(*tmp)); memcpy(tmp, x+n-r, r*sizeof(*tmp)); memmove(x+r, x, (n-r)*sizeof(*x)); memcpy(x, tmp, r*sizeof(*tmp)); free(tmp); } // Compute the gcd of a and b // assumes a, b > 0. int gcd(int a, int b){ if (a%b == 0) return b; //if (a & (b-1) == 0) return b; return gcd(b, a%b); //return gcd(b, a&(b-1)); } double phase(complex_t x){ return atan2(cimag(x), creal(x)); } /* inline double cabs2(complex_t x){ return (creal(x) * creal(x) + cimag(x) * cimag(x)); } */ // crappy inversion code I stole from elsewhere // Undefined if gcd(a, n) > 1 int mod_inverse(int a, int n) { int i = n, v = 0, d = 1; while (a>0) { int t = i/a, x = a; a = i % x; i = x; x = d; d = v - t*x; v = x; } v %= n; if (v<0) v = (v+n)%n; return v; } /* Compute the num'th smallest element of the length n input. uses std::nth_element, but doesn't mutate input. */ real_t nth_element_immutable(real_t *input, int n, int num){ real_t *x = (real_t *)malloc(n*sizeof(*x)); memcpy(x, input, n*sizeof(*x)); #ifdef WRAPPER nthelement(x, num, n); #else qsort(x,n,sizeof(*x),compare); #endif real_t ans = x[num]; free(x); return ans; } /* Output the indices corresponding to the num largest elements of samples. Output is sorted. int *output -- output array int num -- size of output, euqals to the cutoff num real_t *samples -- input array int n -- size of input array, in this case, is B Cut B into num */ void find_largest_indices(int *output, int num, real_t *samples, int n){ assert(n >= num + 1); //use num+1 so we can use > cutoff and probably get exactly num. //if we get fewer, the second pass uses == cutoff. real_t cutoff = nth_element_immutable(samples, n, n-num-1); int count = 0; for(int i = 0; i < n; i++){ if (samples[i] > cutoff){ output[count++] = i; } } //printf("count =%d, num = %d\n", count, num); //#pragma omp parallel for schedule(static) //for(int i = 0; i < n; i++){ // if (samples[i] >= cutoff){ // output[i] = i; // } //} //printf("count =%d, num = %d\n", count, num); if (count < num){ for(int i = 0; i < n; i++){ if (samples[i] == cutoff) { output[count++] = i; if (count >= num) break; } } qsort(output, count, sizeof(int), compare); } assert(count == num); } void radix(int byte, int size, int *A, int *TEMP) { int* COUNT = (int*)calloc(256,sizeof(*COUNT)); byte = byte << 3; for (int i = 0; i < size; ++i) ++COUNT[((A[i]) >> (byte)) & 0xFF]; for (int i = 1; i < 256; ++i) COUNT[i] += COUNT[i - 1]; for (int i = size - 1; i >= 0; --i) { TEMP[COUNT[(A[i] >> (byte)) & 0xFF] - 1] = A[i]; --COUNT[(A[i] >> (byte)) & 0xFF]; } free(COUNT); } void radix_sort(int *A, int size) { int* TEMP = (int*)malloc(size*sizeof(*TEMP)); for (unsigned int i = 0; i < sizeof(int); i += 2) { // even byte radix(i, size, A, TEMP); // odd byte radix(i + 1, size, TEMP, A); } free(TEMP); } void radix_filt(int byte, int size, int *A, int *TEMP, complex_t* Filter, complex_t* TMP_F) { int* COUNT = (int*)calloc(256,sizeof(*COUNT)); byte = byte << 3; for (int i = 0; i < size; ++i) ++COUNT[((A[i]) >> (byte)) & 0xFF]; for (int i = 1; i < 256; ++i) COUNT[i] += COUNT[i - 1]; for (int i = size - 1; i >= 0; --i) { TEMP[COUNT[(A[i] >> (byte)) & 0xFF] - 1] = A[i]; TMP_F[COUNT[(A[i] >> (byte)) & 0xFF] - 1] = Filter[i]; --COUNT[(A[i] >> (byte)) & 0xFF]; } free(COUNT); } void radix_sort_filt(int *A, complex_t* Filter,int size) { int *TEMP = (int*)malloc(size*sizeof(*TEMP)); complex_t *TMP_F = (complex_t*)malloc(size*sizeof(*TMP_F)); for (unsigned int i = 0; i < sizeof(int); i += 2) { // even byte radix_filt(i, size, A, TEMP, Filter, TMP_F); // odd byte radix_filt(i + 1, size, TEMP, A, TMP_F, Filter); } free(TEMP); free(TMP_F); } int floor_to_pow2(double x){ unsigned int ans; for(ans = 1; ans <= x; ans <<= 1) ; return ans / 2; } double AWGN(complex_t *x, int n, double std_noise){ if(std_noise==0) return 1000000000; complex_t gn =0; double sig_power =0; double noise_power =0; double snr; double u, v; for(int h = 0 ; h < n; h++){ sig_power += cabs(x[h])*cabs(x[h]); u=drand48(); v=drand48(); gn = std_noise * sqrt(-2*log(u)) * cexp(2*M_PI * I * v); noise_power += -2*log(u); x[h] += gn; } noise_power = noise_power * std_noise * std_noise; snr = sig_power/noise_power; return snr; } double binomial_cdf(double prob, int n, int needed){ double ans = 0; double choose = 1; for(int i = n; i >= needed; i--){ ans += choose * pow(prob, i) * pow(1-prob, n-i); choose = choose * i / (n-i+1); } return ans; }
HybridCplxAdoptor.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // ////////////////////////////////////////////////////////////////////////////////////// /** @file HybridCplxAdoptor.h * * Adoptor classes to handle complex hybrid orbitals with arbitrary precision */ #ifndef QMCPLUSPLUS_HYBRID_CPLX_SOA_ADOPTOR_H #define QMCPLUSPLUS_HYBRID_CPLX_SOA_ADOPTOR_H #include <QMCWaveFunctions/BsplineFactory/HybridAdoptorBase.h> namespace qmcplusplus { /** adoptor class to match * */ template<typename BaseAdoptor> struct HybridCplxSoA : public BaseAdoptor, public HybridAdoptorBase<typename BaseAdoptor::DataType> { using HybridBase = HybridAdoptorBase<typename BaseAdoptor::DataType>; using ST = typename BaseAdoptor::DataType; using PointType = typename BaseAdoptor::PointType; using SingleSplineType = typename BaseAdoptor::SingleSplineType; using RealType = typename SPOSet::RealType; using ValueType = typename SPOSet::ValueType; typename OrbitalSetTraits<ValueType>::ValueVector_t psi_AO, d2psi_AO; typename OrbitalSetTraits<ValueType>::GradVector_t dpsi_AO; Matrix<ST, aligned_allocator<ST>> multi_myV; using BaseAdoptor::myG; using BaseAdoptor::myH; using BaseAdoptor::myL; using BaseAdoptor::myV; using HybridBase::d2f_dr2; using HybridBase::df_dr; using HybridBase::dist_dr; using HybridBase::dist_r; HybridCplxSoA() : BaseAdoptor() { this->AdoptorName = "Hybrid" + this->AdoptorName; this->KeyWord = "Hybrid" + this->KeyWord; } inline void resizeStorage(size_t n, size_t nvals) { BaseAdoptor::resizeStorage(n, nvals); HybridBase::resizeStorage(myV.size()); } void bcast_tables(Communicate* comm) { BaseAdoptor::bcast_tables(comm); HybridBase::bcast_tables(comm); } void gather_tables(Communicate* comm) { BaseAdoptor::gather_tables(comm); HybridBase::gather_atomic_tables(comm, BaseAdoptor::offset); } bool read_splines(hdf_archive& h5f) { return HybridBase::read_splines(h5f) && BaseAdoptor::read_splines(h5f); } bool write_splines(hdf_archive& h5f) { return HybridBase::write_splines(h5f) && BaseAdoptor::write_splines(h5f); } inline void flush_zero() { //BaseAdoptor::flush_zero(); HybridBase::flush_zero(); } template<typename VV> inline void evaluate_v(const ParticleSet& P, const int iat, VV& psi) { const RealType smooth_factor = HybridBase::evaluate_v(P, iat, myV); const RealType cone(1); if (smooth_factor < 0) { BaseAdoptor::evaluate_v(P, iat, psi); } else if (smooth_factor == cone) { const PointType& r = P.activeR(iat); BaseAdoptor::assign_v(r, myV, psi, 0, myV.size() / 2); } else { const PointType& r = P.activeR(iat); psi_AO.resize(psi.size()); BaseAdoptor::assign_v(r, myV, psi_AO, 0, myV.size() / 2); BaseAdoptor::evaluate_v(P, iat, psi); HybridBase::interpolate_buffer_v(psi, psi_AO); } } template<typename VV, typename RT> inline void evaluateDetRatios(const VirtualParticleSet& VP, VV& psi, const VV& psiinv, std::vector<RT>& ratios) { if (VP.isOnSphere()) { // resize scratch space psi_AO.resize(psi.size()); if (multi_myV.rows() < VP.getTotalNum()) multi_myV.resize(VP.getTotalNum(), myV.size()); const RealType smooth_factor = HybridBase::evaluateValuesC2X(VP, multi_myV); const RealType cone(1); for (int iat = 0; iat < VP.getTotalNum(); ++iat) { if (smooth_factor < 0) BaseAdoptor::evaluate_v(VP, iat, psi); else if (smooth_factor == cone) { const PointType& r = VP.R[iat]; Vector<ST, aligned_allocator<ST>> myV_one(multi_myV[iat], myV.size()); BaseAdoptor::assign_v(r, myV_one, psi, 0, myV.size() / 2); } else { const PointType& r = VP.R[iat]; Vector<ST, aligned_allocator<ST>> myV_one(multi_myV[iat], myV.size()); BaseAdoptor::assign_v(r, myV_one, psi_AO, 0, myV.size() / 2); BaseAdoptor::evaluate_v(VP, iat, psi); HybridBase::interpolate_buffer_v(psi, psi_AO); } ratios[iat] = simd::dot(psi.data(), psiinv.data(), psi.size()); } } else { for (int iat = 0; iat < VP.getTotalNum(); ++iat) { evaluate_v(VP, iat, psi); ratios[iat] = simd::dot(psi.data(), psiinv.data(), psi.size()); } } } template<typename VV, typename GV> inline void evaluate_vgl(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, VV& d2psi) { const RealType smooth_factor = HybridBase::evaluate_vgl(P, iat, myV, myG, myL); const RealType cone(1); if (smooth_factor < 0) { BaseAdoptor::evaluate_vgl(P, iat, psi, dpsi, d2psi); } else if (smooth_factor == cone) { const PointType& r = P.activeR(iat); BaseAdoptor::assign_vgl_from_l(r, psi, dpsi, d2psi); } else { const PointType& r = P.activeR(iat); psi_AO.resize(psi.size()); dpsi_AO.resize(psi.size()); d2psi_AO.resize(psi.size()); BaseAdoptor::assign_vgl_from_l(r, psi_AO, dpsi_AO, d2psi_AO); BaseAdoptor::evaluate_vgl(P, iat, psi, dpsi, d2psi); HybridBase::interpolate_buffer_vgl(psi, dpsi, d2psi, psi_AO, dpsi_AO, d2psi_AO); } } template<typename VV, typename GV> inline void mw_evaluate_vgl(const std::vector<HybridCplxSoA*>& sa_list, const std::vector<ParticleSet*>& P_list, int iat, const std::vector<VV*>& psi_v_list, const std::vector<GV*>& dpsi_v_list, const std::vector<VV*>& d2psi_v_list) { #pragma omp parallel for for (int iw = 0; iw < sa_list.size(); iw++) sa_list[iw]->evaluate_vgl(*P_list[iw], iat, *psi_v_list[iw], *dpsi_v_list[iw], *d2psi_v_list[iw]); } template<typename VV, typename GV, typename GGV> inline void evaluate_vgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi) { APP_ABORT("HybridCplxSoA::evaluate_vgh not implemented!"); if (HybridBase::evaluate_vgh(P, iat, myV, myG, myH)) { const PointType& r = P.activeR(iat); BaseAdoptor::assign_vgh(r, psi, dpsi, grad_grad_psi, 0, myV.size() / 2); } else BaseAdoptor::evaluate_vgh(P, iat, psi, dpsi, grad_grad_psi); } template<typename VV, typename GV, typename GGV, typename GGGV> inline void evaluate_vghgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi, GGGV& grad_grad_grad_psi) { APP_ABORT("HybridCplxSoA::evaluate_vghgh not implemented!"); } }; } // namespace qmcplusplus #endif
GB_AxB_saxpy3_symbolic.c
//------------------------------------------------------------------------------ // GB_AxB_saxpy3_symbolic: symbolic analysis for GB_AxB_saxpy3 //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Symbolic analysis for C=A*B, C<M>=A*B or C<!M>=A*B, via GB_AxB_saxpy3. // Coarse tasks compute nnz (C (:,j)) for each of their vectors j. Fine tasks // just scatter the mask M into the hash table. This phase does not depend on // the semiring, nor does it depend on the type of C, A, or B. It does access // the values of M, if the mask matrix M is present and not structural. // If B is hypersparse, C must also be hypersparse. // Otherwise, C must be sparse. #include "GB_AxB_saxpy3.h" #include "GB_AxB_saxpy3_template.h" #include "GB_atomics.h" #include "GB_bracket.h" #include "GB_unused.h" void GB_AxB_saxpy3_symbolic ( GrB_Matrix C, // Cp is computed for coarse tasks const GrB_Matrix M, // mask matrix M const bool Mask_comp, // M complemented, or not const bool Mask_struct, // M structural, or not const bool M_dense_in_place, const GrB_Matrix A, // A matrix; only the pattern is accessed const GrB_Matrix B, // B matrix; only the pattern is accessed GB_saxpy3task_struct *TaskList, // list of tasks, and workspace int ntasks, // total number of tasks int nfine, // number of fine tasks int nthreads // number of threads ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_ZOMBIES (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (B)) ; ASSERT (GB_JUMBLED_OK (B)) ; ASSERT (!GB_PENDING (B)) ; //-------------------------------------------------------------------------- // get M, A, B, and C //-------------------------------------------------------------------------- int64_t *GB_RESTRICT Cp = C->p ; const int64_t cvlen = C->vlen ; const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bh = B->h ; const int8_t *GB_RESTRICT Bb = B->b ; const int64_t *GB_RESTRICT Bi = B->i ; const int64_t bvlen = B->vlen ; const bool B_jumbled = B->jumbled ; const bool B_is_bitmap = GB_IS_BITMAP (B) ; const bool B_is_sparse = GB_IS_SPARSE (B) ; const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ; const bool B_is_sparse_or_hyper = B_is_sparse || B_is_hyper ; const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; const int8_t *GB_RESTRICT Ab = A->b ; const int64_t *GB_RESTRICT Ai = A->i ; const int64_t anvec = A->nvec ; const int64_t avlen = A->vlen ; const bool A_is_bitmap = GB_IS_BITMAP (A) ; const bool A_is_sparse = GB_IS_SPARSE (A) ; const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; const bool A_jumbled = A->jumbled ; const int64_t *GB_RESTRICT Mp = NULL ; const int64_t *GB_RESTRICT Mh = NULL ; const int8_t *GB_RESTRICT Mb = NULL ; const int64_t *GB_RESTRICT Mi = NULL ; const GB_void *GB_RESTRICT Mx = NULL ; size_t msize = 0 ; int64_t mnvec = 0 ; int64_t mvlen = 0 ; const bool M_is_hyper = GB_IS_HYPERSPARSE (M) ; const bool M_is_bitmap = GB_IS_BITMAP (M) ; const bool M_jumbled = GB_JUMBLED (M) ; if (M != NULL) { Mp = M->p ; Mh = M->h ; Mb = M->b ; Mi = M->i ; Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; msize = M->type->size ; mnvec = M->nvec ; mvlen = M->vlen ; } // 3 cases: // M not present and Mask_comp false: compute C=A*B // M present and Mask_comp false: compute C<M>=A*B // M present and Mask_comp true : compute C<!M>=A*B // If M is NULL on input, then Mask_comp is also false on input. const bool mask_is_M = (M != NULL && !Mask_comp) ; // ignore the mask if present, not complemented, dense and // used in place, structural, and not bitmap. In this case, // all entries in M are true, so M can be ignored. const bool ignore_mask = mask_is_M && M_dense_in_place && Mask_struct && !M_is_bitmap ; //========================================================================== // phase1: count nnz(C(:,j)) for coarse tasks, scatter M for fine tasks //========================================================================== // At this point, all of Hf [...] is zero, for all tasks. // Hi and Hx are not initialized. int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t hash_size = TaskList [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; if (taskid < nfine) { //------------------------------------------------------------------ // no work for fine tasks in phase1 if M is not present //------------------------------------------------------------------ if (M == NULL) continue ; //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ int64_t kk = TaskList [taskid].vector ; int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]) ; // no work to do if B(:,j) is empty if (bjnz == 0) continue ; // partition M(:,j) GB_GET_M_j ; // get M(:,j) int team_size = TaskList [taskid].team_size ; int leader = TaskList [taskid].leader ; int my_teamid = taskid - leader ; int64_t mystart, myend ; GB_PARTITION (mystart, myend, mjnz, my_teamid, team_size) ; mystart += pM_start ; myend += pM_start ; if (use_Gustavson) { //-------------------------------------------------------------- // phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B //-------------------------------------------------------------- // Scatter the values of M(:,j) into Hf. No atomics needed // since all indices i in M(;,j) are unique. Do not scatter // the mask if M(:,j) is a dense vector, since in that case // the numeric phase accesses M(:,j) directly, not via Hf. if (mjnz > 0) { int8_t *GB_RESTRICT Hf = (int8_t *GB_RESTRICT) TaskList [taskid].Hf ; GB_SCATTER_M_j (mystart, myend, 1) ; } } else if (!M_dense_in_place) { //-------------------------------------------------------------- // phase1: fine hash task, C<M>=A*B or C<!M>=A*B //-------------------------------------------------------------- // If M_dense_in_place is true, this is skipped. The mask M // is dense, and is used in-place. // The least significant 2 bits of Hf [hash] is the flag f, and // the upper bits contain h, as (h,f). After this phase1, if // M(i,j)=1 then the hash table contains ((i+1),1) in Hf [hash] // at some location. // Later, the flag values of f = 2 and 3 are also used. // Only f=1 is set in this phase. // h == 0, f == 0: unoccupied and unlocked // h == i+1, f == 1: occupied with M(i,j)=1 int64_t *GB_RESTRICT Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ; int64_t hash_bits = (hash_size-1) ; // scan my M(:,j) for (int64_t pM = mystart ; pM < myend ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) if (!mij) continue ; // skip if M(i,j)=0 int64_t i = GBI (Mi, pM, mvlen) ; int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1) for (GB_HASH (i)) { int64_t hf ; // swap my hash entry into the hash table; // does the following using an atomic capture: // { hf = Hf [hash] ; Hf [hash] = i_mine ; } GB_ATOMIC_CAPTURE_INT64 (hf, Hf [hash], i_mine) ; if (hf == 0) break ; // success // i_mine has been inserted, but a prior entry was // already there. It needs to be replaced, so take // ownership of this displaced entry, and keep // looking until a new empty slot is found for it. i_mine = hf ; } } } } else { //------------------------------------------------------------------ // coarse tasks: compute nnz in each vector of A*B(:,kfirst:klast) //------------------------------------------------------------------ int64_t *GB_RESTRICT Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ; int64_t kfirst = TaskList [taskid].start ; int64_t klast = TaskList [taskid].end ; int64_t mark = 0 ; if (use_Gustavson) { //-------------------------------------------------------------- // phase1: coarse Gustavson task //-------------------------------------------------------------- if (M == NULL) { //---------------------------------------------------------- // phase1: coarse Gustavson task, C=A*B //---------------------------------------------------------- #define GB_SAXPY_COARSE_GUSTAVSON_NOMASK_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_GUSTAVSON_NOMASK_PHASE1 } else if (mask_is_M) { //---------------------------------------------------------- // phase1: coarse Gustavson task, C<M>=A*B //---------------------------------------------------------- #define GB_SAXPY_COARSE_GUSTAVSON_M_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_GUSTAVSON_M_PHASE1 } else { //---------------------------------------------------------- // phase1: coarse Gustavson task, C<!M>=A*B //---------------------------------------------------------- #define GB_SAXPY_COARSE_GUSTAVSON_NOTM_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_GUSTAVSON_NOTM_PHASE1 } } else { //-------------------------------------------------------------- // phase1: coarse hash task //-------------------------------------------------------------- int64_t *GB_RESTRICT Hi = TaskList [taskid].Hi ; int64_t hash_bits = (hash_size-1) ; if (M == NULL || ignore_mask) { //---------------------------------------------------------- // phase1: coarse hash task, C=A*B //---------------------------------------------------------- // no mask present, or mask ignored #undef GB_CHECK_MASK_ij #define GB_SAXPY_COARSE_HASH_PHASE1 #include "GB_meta16_factory.c" } else if (mask_is_M) { //---------------------------------------------------------- // phase1: coarse hash task, C<M>=A*B //---------------------------------------------------------- if (M_dense_in_place) { //------------------------------------------------------ // M(:,j) is dense. M is not scattered into Hf. //------------------------------------------------------ ASSERT (!Mask_struct || M_is_bitmap) ; #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : (Mjx [i] != 0)) ; \ if (!mij) continue ; switch (msize) { default: case 1 : #undef M_TYPE #define M_TYPE uint8_t #undef M_SIZE #define M_SIZE 1 #include "GB_meta16_factory.c" break ; case 2 : #undef M_TYPE #define M_TYPE uint16_t #include "GB_meta16_factory.c" break ; case 4 : #undef M_TYPE #define M_TYPE uint32_t #include "GB_meta16_factory.c" break ; case 8 : #undef M_TYPE #define M_TYPE uint64_t #include "GB_meta16_factory.c" break ; case 16 : #undef M_TYPE #define M_TYPE uint64_t #undef M_SIZE #define M_SIZE 2 #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : \ (Mjx [2*i] != 0) || \ (Mjx [2*i+1] != 0)) ; \ if (!mij) continue ; #include "GB_meta16_factory.c" break ; } #undef GB_SAXPY_COARSE_HASH_PHASE1 } else { //------------------------------------------------------ // M is sparse and scattered into Hf //------------------------------------------------------ #define GB_SAXPY_COARSE_HASH_M_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_HASH_M_PHASE1 } } else { //---------------------------------------------------------- // phase1: coarse hash task, C<!M>=A*B //---------------------------------------------------------- if (M_dense_in_place) { //------------------------------------------------------ // M(:,j) is dense. M is not scattered into Hf. //------------------------------------------------------ if (Mask_struct && !M_is_bitmap) { // structural mask, complemented, not bitmap. // No work to do; C is empty. for (int64_t kk = kfirst ; kk <= klast ; kk++) { Cp [kk] = 0 ; } continue ; } #define GB_SAXPY_COARSE_HASH_PHASE1 #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : (Mjx [i] != 0)) ; \ if (mij) continue ; switch (msize) { default: case 1 : #undef M_TYPE #define M_TYPE uint8_t #undef M_SIZE #define M_SIZE 1 #include "GB_meta16_factory.c" break ; case 2 : #undef M_TYPE #define M_TYPE uint16_t #include "GB_meta16_factory.c" break ; case 4 : #undef M_TYPE #define M_TYPE uint32_t #include "GB_meta16_factory.c" break ; case 8 : #undef M_TYPE #define M_TYPE uint64_t #include "GB_meta16_factory.c" break ; case 16 : #undef M_TYPE #define M_TYPE uint64_t #undef M_SIZE #define M_SIZE 2 #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : \ (Mjx [2*i] != 0) || \ (Mjx [2*i+1] != 0)) ; \ if (mij) continue ; #include "GB_meta16_factory.c" break ; } #undef GB_SAXPY_COARSE_HASH_PHASE1 } else { //------------------------------------------------------ // M is sparse and scattered into Hf //------------------------------------------------------ #define GB_SAXPY_COARSE_HASH_NOTM_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_HASH_NOTM_PHASE1 } } } } } //-------------------------------------------------------------------------- // check result for phase1 for fine tasks //-------------------------------------------------------------------------- #ifdef GB_DEBUG if (M != NULL) { for (taskid = 0 ; taskid < nfine ; taskid++) { int64_t kk = TaskList [taskid].vector ; ASSERT (kk >= 0 && kk < B->nvec) ; int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]) ; // no work to do if B(:,j) is empty if (bjnz == 0) continue ; int64_t hash_size = TaskList [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; int leader = TaskList [taskid].leader ; if (leader != taskid) continue ; GB_GET_M_j ; // get M(:,j) if (mjnz == 0) continue ; int64_t mjcount2 = 0 ; int64_t mjcount = 0 ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) if (mij) mjcount++ ; } if (use_Gustavson) { // phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B int8_t *GB_RESTRICT Hf = (int8_t *GB_RESTRICT) TaskList [taskid].Hf ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) int64_t i = GBI (Mi, pM, mvlen) ; ASSERT (Hf [i] == mij) ; } for (int64_t i = 0 ; i < cvlen ; i++) { ASSERT (Hf [i] == 0 || Hf [i] == 1) ; if (Hf [i] == 1) mjcount2++ ; } ASSERT (mjcount == mjcount2) ; } else if (!M_dense_in_place) { // phase1: fine hash task, C<M>=A*B or C<!M>=A*B // h == 0, f == 0: unoccupied and unlocked // h == i+1, f == 1: occupied with M(i,j)=1 int64_t *GB_RESTRICT Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ; int64_t hash_bits = (hash_size-1) ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) if (!mij) continue ; // skip if M(i,j)=0 int64_t i = GBI (Mi, pM, mvlen) ; int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1) int64_t probe = 0 ; for (GB_HASH (i)) { int64_t hf = Hf [hash] ; if (hf == i_mine) { mjcount2++ ; break ; } ASSERT (hf != 0) ; probe++ ; ASSERT (probe < cvlen) ; } } ASSERT (mjcount == mjcount2) ; mjcount2 = 0 ; for (int64_t hash = 0 ; hash < hash_size ; hash++) { int64_t hf = Hf [hash] ; int64_t h = (hf >> 2) ; // empty (0), or a 1-based int64_t f = (hf & 3) ; // 0 if empty or 1 if occupied if (f == 1) ASSERT (h >= 1 && h <= cvlen) ; ASSERT (hf == 0 || f == 1) ; if (f == 1) mjcount2++ ; } ASSERT (mjcount == mjcount2) ; } } } #endif }
accuracy_cython.c
/* Generated by Cython 0.29.23 */ /* BEGIN: Cython Metadata { "distutils": { "extra_compile_args": [ "-fopenmp", "-ffast-math" ], "extra_link_args": [ "-fopenmp" ], "name": "glove.metrics.accuracy_cython", "sources": [ "glove/metrics/accuracy_cython.pyx" ] }, "module_name": "glove.metrics.accuracy_cython" } END: Cython Metadata */ #ifndef PY_SSIZE_T_CLEAN #define PY_SSIZE_T_CLEAN #endif /* PY_SSIZE_T_CLEAN */ #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_23" #define CYTHON_HEX_VERSION 0x001D17F0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__glove__metrics__accuracy_cython #define __PYX_HAVE_API__glove__metrics__accuracy_cython /* Early includes */ #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "glove/metrics/accuracy_cython.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* GCCDiagnostics.proto */ #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) #define __Pyx_HAS_GCC_DIAGNOSTIC #endif /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'glove.metrics.accuracy_cython' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static double __pyx_f_5glove_7metrics_15accuracy_cython_dot(__Pyx_memviewslice, __Pyx_memviewslice, int); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "glove.metrics.accuracy_cython" extern int __pyx_module_is_main_glove__metrics__accuracy_cython; int __pyx_module_is_main_glove__metrics__accuracy_cython = 0; /* Implementation of 'glove.metrics.accuracy_cython' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_k[] = "k"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_input[] = "input"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_score[] = "score"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_inputs[] = "inputs"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_wordvec[] = "wordvec"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_expected[] = "expected"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_skip_word[] = "skip_word"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_no_threads[] = "no_threads"; static const char __pyx_k_no_wordvec[] = "no_wordvec"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_violations[] = "violations"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_wordvec_norm[] = "wordvec_norm"; static const char __pyx_k_no_components[] = "no_components"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_rank_violations[] = "rank_violations"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_no_input_vectors[] = "no_input_vectors"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_score_of_expected[] = "score_of_expected"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_compute_rank_violations[] = "compute_rank_violations"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_glove_metrics_accuracy_cython[] = "glove.metrics.accuracy_cython"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_glove_metrics_accuracy_cython_py[] = "glove/metrics/accuracy_cython.pyx"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_compute_rank_violations; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_expected; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_n_s_glove_metrics_accuracy_cython; static PyObject *__pyx_kp_s_glove_metrics_accuracy_cython_py; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_input; static PyObject *__pyx_n_s_inputs; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_n_s_no_components; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_no_input_vectors; static PyObject *__pyx_n_s_no_threads; static PyObject *__pyx_n_s_no_wordvec; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_rank_violations; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_score; static PyObject *__pyx_n_s_score_of_expected; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_skip_word; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_violations; static PyObject *__pyx_n_s_wordvec; static PyObject *__pyx_n_s_wordvec_norm; static PyObject *__pyx_pf_5glove_7metrics_15accuracy_cython_compute_rank_violations(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_norm, __Pyx_memviewslice __pyx_v_input, __Pyx_memviewslice __pyx_v_expected, __Pyx_memviewslice __pyx_v_inputs, __Pyx_memviewslice __pyx_v_rank_violations, CYTHON_UNUSED int __pyx_v_no_threads); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__27; /* Late includes */ /* "glove/metrics/accuracy_cython.pyx":7 * * * cdef double dot(double[::1] x, # <<<<<<<<<<<<<< * double[::1] y, * int dim) nogil: */ static double __pyx_f_5glove_7metrics_15accuracy_cython_dot(__Pyx_memviewslice __pyx_v_x, __Pyx_memviewslice __pyx_v_y, int __pyx_v_dim) { int __pyx_v_i; double __pyx_v_result; double __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "glove/metrics/accuracy_cython.pyx":12 * * cdef int i * cdef double result = 0.0 # <<<<<<<<<<<<<< * * for i in range(dim): */ __pyx_v_result = 0.0; /* "glove/metrics/accuracy_cython.pyx":14 * cdef double result = 0.0 * * for i in range(dim): # <<<<<<<<<<<<<< * result += x[i] * y[i] * */ __pyx_t_1 = __pyx_v_dim; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "glove/metrics/accuracy_cython.pyx":15 * * for i in range(dim): * result += x[i] * y[i] # <<<<<<<<<<<<<< * * return result */ __pyx_t_4 = __pyx_v_i; __pyx_t_5 = __pyx_v_i; __pyx_v_result = (__pyx_v_result + ((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_x.data) + __pyx_t_4)) ))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_y.data) + __pyx_t_5)) ))))); } /* "glove/metrics/accuracy_cython.pyx":17 * result += x[i] * y[i] * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "glove/metrics/accuracy_cython.pyx":7 * * * cdef double dot(double[::1] x, # <<<<<<<<<<<<<< * double[::1] y, * int dim) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "glove/metrics/accuracy_cython.pyx":20 * * * def compute_rank_violations(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordvec_norm, * double[:, ::1] input, */ /* Python wrapper */ static PyObject *__pyx_pw_5glove_7metrics_15accuracy_cython_1compute_rank_violations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5glove_7metrics_15accuracy_cython_compute_rank_violations[] = "\n Compute the rank violations\n of the expected words in the word analogy task.\n "; static PyMethodDef __pyx_mdef_5glove_7metrics_15accuracy_cython_1compute_rank_violations = {"compute_rank_violations", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5glove_7metrics_15accuracy_cython_1compute_rank_violations, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5glove_7metrics_15accuracy_cython_compute_rank_violations}; static PyObject *__pyx_pw_5glove_7metrics_15accuracy_cython_1compute_rank_violations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_wordvec = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordvec_norm = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_input = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_expected = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_inputs = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_rank_violations = { 0, 0, { 0 }, { 0 }, { 0 } }; CYTHON_UNUSED int __pyx_v_no_threads; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compute_rank_violations (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wordvec,&__pyx_n_s_wordvec_norm,&__pyx_n_s_input,&__pyx_n_s_expected,&__pyx_n_s_inputs,&__pyx_n_s_rank_violations,&__pyx_n_s_no_threads,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordvec)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordvec_norm)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 1); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_input)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 2); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_expected)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 3); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inputs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 4); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rank_violations)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 5); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_no_threads)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 6); __PYX_ERR(0, 20, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "compute_rank_violations") < 0)) __PYX_ERR(0, 20, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); } __pyx_v_wordvec = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordvec.memview)) __PYX_ERR(0, 20, __pyx_L3_error) __pyx_v_wordvec_norm = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordvec_norm.memview)) __PYX_ERR(0, 21, __pyx_L3_error) __pyx_v_input = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_input.memview)) __PYX_ERR(0, 22, __pyx_L3_error) __pyx_v_expected = __Pyx_PyObject_to_MemoryviewSlice_ds_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_expected.memview)) __PYX_ERR(0, 23, __pyx_L3_error) __pyx_v_inputs = __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_inputs.memview)) __PYX_ERR(0, 24, __pyx_L3_error) __pyx_v_rank_violations = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[5], PyBUF_WRITABLE); if (unlikely(!__pyx_v_rank_violations.memview)) __PYX_ERR(0, 25, __pyx_L3_error) __pyx_v_no_threads = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_no_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 26, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 20, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("glove.metrics.accuracy_cython.compute_rank_violations", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5glove_7metrics_15accuracy_cython_compute_rank_violations(__pyx_self, __pyx_v_wordvec, __pyx_v_wordvec_norm, __pyx_v_input, __pyx_v_expected, __pyx_v_inputs, __pyx_v_rank_violations, __pyx_v_no_threads); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5glove_7metrics_15accuracy_cython_compute_rank_violations(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_norm, __Pyx_memviewslice __pyx_v_input, __Pyx_memviewslice __pyx_v_expected, __Pyx_memviewslice __pyx_v_inputs, __Pyx_memviewslice __pyx_v_rank_violations, CYTHON_UNUSED int __pyx_v_no_threads) { int __pyx_v_i; int __pyx_v_j; int __pyx_v_k; CYTHON_UNUSED int __pyx_v_no_input_vectors; int __pyx_v_no_wordvec; int __pyx_v_skip_word; int __pyx_v_no_components; int __pyx_v_violations; double __pyx_v_score_of_expected; double __pyx_v_score; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_5; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; __Pyx_RefNannySetupContext("compute_rank_violations", 0); /* "glove/metrics/accuracy_cython.pyx":37 * cdef double score_of_expected, score * * no_input_vectors = input.shape[0] # <<<<<<<<<<<<<< * no_wordvec = wordvec.shape[0] * no_components = wordvec.shape[1] */ __pyx_v_no_input_vectors = (__pyx_v_input.shape[0]); /* "glove/metrics/accuracy_cython.pyx":38 * * no_input_vectors = input.shape[0] * no_wordvec = wordvec.shape[0] # <<<<<<<<<<<<<< * no_components = wordvec.shape[1] * */ __pyx_v_no_wordvec = (__pyx_v_wordvec.shape[0]); /* "glove/metrics/accuracy_cython.pyx":39 * no_input_vectors = input.shape[0] * no_wordvec = wordvec.shape[0] * no_components = wordvec.shape[1] # <<<<<<<<<<<<<< * * with nogil: */ __pyx_v_no_components = (__pyx_v_wordvec.shape[1]); /* "glove/metrics/accuracy_cython.pyx":41 * no_components = wordvec.shape[1] * * with nogil: # <<<<<<<<<<<<<< * for i in prange(no_input_vectors, num_threads=no_threads, * schedule='dynamic'): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "glove/metrics/accuracy_cython.pyx":42 * * with nogil: * for i in prange(no_input_vectors, num_threads=no_threads, # <<<<<<<<<<<<<< * schedule='dynamic'): * */ __pyx_t_1 = __pyx_v_no_input_vectors; if ((1 == 0)) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(__pyx_v_no_threads) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_5, __pyx_t_7, __pyx_t_8, __pyx_t_9) firstprivate(__pyx_t_4, __pyx_t_6) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_score) lastprivate(__pyx_v_score_of_expected) lastprivate(__pyx_v_skip_word) lastprivate(__pyx_v_violations) schedule(dynamic) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_2); /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); __pyx_v_k = ((int)0xbad0bad0); __pyx_v_score = ((double)__PYX_NAN()); __pyx_v_score_of_expected = ((double)__PYX_NAN()); __pyx_v_skip_word = ((int)0xbad0bad0); __pyx_v_violations = ((int)0xbad0bad0); /* "glove/metrics/accuracy_cython.pyx":46 * * # Compute the score of the expected word. * score_of_expected = (dot(input[i], # <<<<<<<<<<<<<< * wordvec[expected[i]], * no_components) */ __pyx_t_4.data = __pyx_v_input.data; __pyx_t_4.memview = __pyx_v_input.memview; __PYX_INC_MEMVIEW(&__pyx_t_4, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_input.strides[0]; __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_4.shape[0] = __pyx_v_input.shape[1]; __pyx_t_4.strides[0] = __pyx_v_input.strides[1]; __pyx_t_4.suboffsets[0] = -1; __pyx_t_5 = __pyx_v_i; /* "glove/metrics/accuracy_cython.pyx":47 * # Compute the score of the expected word. * score_of_expected = (dot(input[i], * wordvec[expected[i]], # <<<<<<<<<<<<<< * no_components) * / wordvec_norm[expected[i]]) */ __pyx_t_6.data = __pyx_v_wordvec.data; __pyx_t_6.memview = __pyx_v_wordvec.memview; __PYX_INC_MEMVIEW(&__pyx_t_6, 0); { Py_ssize_t __pyx_tmp_idx = (*((int *) ( /* dim=0 */ (__pyx_v_expected.data + __pyx_t_5 * __pyx_v_expected.strides[0]) ))); Py_ssize_t __pyx_tmp_stride = __pyx_v_wordvec.strides[0]; __pyx_t_6.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_6.shape[0] = __pyx_v_wordvec.shape[1]; __pyx_t_6.strides[0] = __pyx_v_wordvec.strides[1]; __pyx_t_6.suboffsets[0] = -1; __pyx_t_5 = __pyx_v_i; /* "glove/metrics/accuracy_cython.pyx":49 * wordvec[expected[i]], * no_components) * / wordvec_norm[expected[i]]) # <<<<<<<<<<<<<< * * # Compute all other scores and count */ __pyx_t_7 = (*((int *) ( /* dim=0 */ (__pyx_v_expected.data + __pyx_t_5 * __pyx_v_expected.strides[0]) ))); __pyx_v_score_of_expected = (__pyx_f_5glove_7metrics_15accuracy_cython_dot(__pyx_t_4, __pyx_t_6, __pyx_v_no_components) / (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordvec_norm.data) + __pyx_t_7)) )))); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "glove/metrics/accuracy_cython.pyx":53 * # Compute all other scores and count * # rank violations. * violations = 0 # <<<<<<<<<<<<<< * * for j in range(no_wordvec): */ __pyx_v_violations = 0; /* "glove/metrics/accuracy_cython.pyx":55 * violations = 0 * * for j in range(no_wordvec): # <<<<<<<<<<<<<< * * # Words from the input do not */ __pyx_t_8 = __pyx_v_no_wordvec; __pyx_t_9 = __pyx_t_8; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_j = __pyx_t_10; /* "glove/metrics/accuracy_cython.pyx":59 * # Words from the input do not * # count as violations. * skip_word = 0 # <<<<<<<<<<<<<< * for k in range(4): * if inputs[i, k] == j: */ __pyx_v_skip_word = 0; /* "glove/metrics/accuracy_cython.pyx":60 * # count as violations. * skip_word = 0 * for k in range(4): # <<<<<<<<<<<<<< * if inputs[i, k] == j: * skip_word = 1 */ for (__pyx_t_11 = 0; __pyx_t_11 < 4; __pyx_t_11+=1) { __pyx_v_k = __pyx_t_11; /* "glove/metrics/accuracy_cython.pyx":61 * skip_word = 0 * for k in range(4): * if inputs[i, k] == j: # <<<<<<<<<<<<<< * skip_word = 1 * break */ __pyx_t_5 = __pyx_v_i; __pyx_t_7 = __pyx_v_k; __pyx_t_12 = (((*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_inputs.data + __pyx_t_5 * __pyx_v_inputs.strides[0]) )) + __pyx_t_7)) ))) == __pyx_v_j) != 0); if (__pyx_t_12) { /* "glove/metrics/accuracy_cython.pyx":62 * for k in range(4): * if inputs[i, k] == j: * skip_word = 1 # <<<<<<<<<<<<<< * break * */ __pyx_v_skip_word = 1; /* "glove/metrics/accuracy_cython.pyx":63 * if inputs[i, k] == j: * skip_word = 1 * break # <<<<<<<<<<<<<< * * if skip_word == 1: */ goto __pyx_L13_break; /* "glove/metrics/accuracy_cython.pyx":61 * skip_word = 0 * for k in range(4): * if inputs[i, k] == j: # <<<<<<<<<<<<<< * skip_word = 1 * break */ } } __pyx_L13_break:; /* "glove/metrics/accuracy_cython.pyx":65 * break * * if skip_word == 1: # <<<<<<<<<<<<<< * continue * */ __pyx_t_12 = ((__pyx_v_skip_word == 1) != 0); if (__pyx_t_12) { /* "glove/metrics/accuracy_cython.pyx":66 * * if skip_word == 1: * continue # <<<<<<<<<<<<<< * * score = (dot(input[i], */ goto __pyx_L10_continue; /* "glove/metrics/accuracy_cython.pyx":65 * break * * if skip_word == 1: # <<<<<<<<<<<<<< * continue * */ } /* "glove/metrics/accuracy_cython.pyx":68 * continue * * score = (dot(input[i], # <<<<<<<<<<<<<< * wordvec[j], * no_components) */ __pyx_t_6.data = __pyx_v_input.data; __pyx_t_6.memview = __pyx_v_input.memview; __PYX_INC_MEMVIEW(&__pyx_t_6, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_input.strides[0]; __pyx_t_6.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_6.shape[0] = __pyx_v_input.shape[1]; __pyx_t_6.strides[0] = __pyx_v_input.strides[1]; __pyx_t_6.suboffsets[0] = -1; __pyx_t_4.data = __pyx_v_wordvec.data; /* "glove/metrics/accuracy_cython.pyx":69 * * score = (dot(input[i], * wordvec[j], # <<<<<<<<<<<<<< * no_components) * / wordvec_norm[j]) */ __pyx_t_4.memview = __pyx_v_wordvec.memview; __PYX_INC_MEMVIEW(&__pyx_t_4, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_j; Py_ssize_t __pyx_tmp_stride = __pyx_v_wordvec.strides[0]; __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_4.shape[0] = __pyx_v_wordvec.shape[1]; __pyx_t_4.strides[0] = __pyx_v_wordvec.strides[1]; __pyx_t_4.suboffsets[0] = -1; __pyx_t_7 = __pyx_v_j; /* "glove/metrics/accuracy_cython.pyx":71 * wordvec[j], * no_components) * / wordvec_norm[j]) # <<<<<<<<<<<<<< * * if score >= score_of_expected: */ __pyx_v_score = (__pyx_f_5glove_7metrics_15accuracy_cython_dot(__pyx_t_6, __pyx_t_4, __pyx_v_no_components) / (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordvec_norm.data) + __pyx_t_7)) )))); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; /* "glove/metrics/accuracy_cython.pyx":73 * / wordvec_norm[j]) * * if score >= score_of_expected: # <<<<<<<<<<<<<< * violations = violations + 1 * */ __pyx_t_12 = ((__pyx_v_score >= __pyx_v_score_of_expected) != 0); if (__pyx_t_12) { /* "glove/metrics/accuracy_cython.pyx":74 * * if score >= score_of_expected: * violations = violations + 1 # <<<<<<<<<<<<<< * * # Update the average rank with the rank */ __pyx_v_violations = (__pyx_v_violations + 1); /* "glove/metrics/accuracy_cython.pyx":73 * / wordvec_norm[j]) * * if score >= score_of_expected: # <<<<<<<<<<<<<< * violations = violations + 1 * */ } __pyx_L10_continue:; } /* "glove/metrics/accuracy_cython.pyx":78 * # Update the average rank with the rank * # of this example. * rank_violations[i] = violations # <<<<<<<<<<<<<< */ __pyx_t_7 = __pyx_v_i; *((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_rank_violations.data) + __pyx_t_7)) )) = __pyx_v_violations; } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "glove/metrics/accuracy_cython.pyx":41 * no_components = wordvec.shape[1] * * with nogil: # <<<<<<<<<<<<<< * for i in prange(no_input_vectors, num_threads=no_threads, * schedule='dynamic'): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "glove/metrics/accuracy_cython.pyx":20 * * * def compute_rank_violations(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordvec_norm, * double[:, ::1] input, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec_norm, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_input, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_expected, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_inputs, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_rank_violations, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_array___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "glove.metrics.accuracy_cython.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "glove.metrics.accuracy_cython.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryview___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "glove.metrics.accuracy_cython.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryviewslice___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "glove.metrics.accuracy_cython._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_accuracy_cython(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_accuracy_cython}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "accuracy_cython", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_compute_rank_violations, __pyx_k_compute_rank_violations, sizeof(__pyx_k_compute_rank_violations), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_expected, __pyx_k_expected, sizeof(__pyx_k_expected), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_n_s_glove_metrics_accuracy_cython, __pyx_k_glove_metrics_accuracy_cython, sizeof(__pyx_k_glove_metrics_accuracy_cython), 0, 0, 1, 1}, {&__pyx_kp_s_glove_metrics_accuracy_cython_py, __pyx_k_glove_metrics_accuracy_cython_py, sizeof(__pyx_k_glove_metrics_accuracy_cython_py), 0, 0, 1, 0}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_input, __pyx_k_input, sizeof(__pyx_k_input), 0, 0, 1, 1}, {&__pyx_n_s_inputs, __pyx_k_inputs, sizeof(__pyx_k_inputs), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_n_s_no_components, __pyx_k_no_components, sizeof(__pyx_k_no_components), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_no_input_vectors, __pyx_k_no_input_vectors, sizeof(__pyx_k_no_input_vectors), 0, 0, 1, 1}, {&__pyx_n_s_no_threads, __pyx_k_no_threads, sizeof(__pyx_k_no_threads), 0, 0, 1, 1}, {&__pyx_n_s_no_wordvec, __pyx_k_no_wordvec, sizeof(__pyx_k_no_wordvec), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_rank_violations, __pyx_k_rank_violations, sizeof(__pyx_k_rank_violations), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_score, __pyx_k_score, sizeof(__pyx_k_score), 0, 0, 1, 1}, {&__pyx_n_s_score_of_expected, __pyx_k_score_of_expected, sizeof(__pyx_k_score_of_expected), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_skip_word, __pyx_k_skip_word, sizeof(__pyx_k_skip_word), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_violations, __pyx_k_violations, sizeof(__pyx_k_violations), 0, 0, 1, 1}, {&__pyx_n_s_wordvec, __pyx_k_wordvec, sizeof(__pyx_k_wordvec), 0, 0, 1, 1}, {&__pyx_n_s_wordvec_norm, __pyx_k_wordvec_norm, sizeof(__pyx_k_wordvec_norm), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 14, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "glove/metrics/accuracy_cython.pyx":20 * * * def compute_rank_violations(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordvec_norm, * double[:, ::1] input, */ __pyx_tuple__19 = PyTuple_Pack(17, __pyx_n_s_wordvec, __pyx_n_s_wordvec_norm, __pyx_n_s_input, __pyx_n_s_expected, __pyx_n_s_inputs, __pyx_n_s_rank_violations, __pyx_n_s_no_threads, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_no_input_vectors, __pyx_n_s_no_wordvec, __pyx_n_s_skip_word, __pyx_n_s_no_components, __pyx_n_s_violations, __pyx_n_s_score_of_expected, __pyx_n_s_score); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(7, 0, 17, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_glove_metrics_accuracy_cython_py, __pyx_n_s_compute_rank_violations, 20, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 20, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__26 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initaccuracy_cython(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initaccuracy_cython(void) #else __Pyx_PyMODINIT_FUNC PyInit_accuracy_cython(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_accuracy_cython(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_accuracy_cython(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'accuracy_cython' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_accuracy_cython(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("accuracy_cython", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_glove__metrics__accuracy_cython) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "glove.metrics.accuracy_cython")) { if (unlikely(PyDict_SetItemString(modules, "glove.metrics.accuracy_cython", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "glove/metrics/accuracy_cython.pyx":20 * * * def compute_rank_violations(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordvec_norm, * double[:, ::1] input, */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5glove_7metrics_15accuracy_cython_1compute_rank_violations, NULL, __pyx_n_s_glove_metrics_accuracy_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_compute_rank_violations, __pyx_t_1) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "glove/metrics/accuracy_cython.pyx":1 * #!python # <<<<<<<<<<<<<< * #cython: boundscheck=False, wraparound=False, cdivision=True, initializedcheck=False * */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init glove.metrics.accuracy_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init glove.metrics.accuracy_cython"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (__Pyx_PyFastCFunction_Check(func)) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return __Pyx_NewRef(__pyx_empty_unicode); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->len > 0) { for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; } if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const char neg_one = (char) -1, const_zero = (char) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
false_sharing.c
#include "omp.h" #include "stdio.h" #include "stdlib.h" #include "malloc.h" int main(int argc, char* argv[]) { if(argc < 2) { printf("usage: %s [num_threads] <stride>\n",argv[0]); return 1; } int num_threads = atoi(argv[1]); int stride = 1; if(argc >= 3) stride = atoi(argv[2]); int* array = memalign(1024,num_threads*stride*sizeof(int)); for(int i = 0; i < num_threads; i+=stride) array[i]=0; double start_time = omp_get_wtime(); #pragma omp parallel num_threads(num_threads) { int index = omp_get_thread_num()*stride; for(int i = 0; i < 1e9; i++) array[index]++; } double end_time = omp_get_wtime(); printf("Took: %f\n",end_time-start_time); return 0; }
transducer.c
#if !defined(APPLE) #include <omp.h> #endif #include <TH/TH.h> #include "transducer.h" inline float log_sum_exp(float a, float b) { if (!isfinite(a)) return b; if (!isfinite(b)) return a; if (a > b) return log1p(exp(b-a)) + a; else return log1p(exp(a-b)) + b; } inline int idx3(int t, int u, int v, int U, int V) { return t * (U * V) + u * V + v; } inline int idx2(int t, int u, int U) { return t * U + u; } int cumsum(int *lens, int num) { int sum = 0; for (int i = 0; i < num; i++) sum += lens[i]; return sum; } float cost_and_grad_single(float* log_probs, float* grads, int* labels, int blank, int T, int U, int V, int s) { // Forward pass float *alphas = (float *) malloc(T * U * sizeof(float)); alphas[0] = 0; for (int t = 1; t < T; t++) { alphas[idx2(t, 0, U)] = alphas[idx2(t-1, 0, U)] + log_probs[idx3(t-1, 0, blank, s, V)]; } for (int u = 1; u < U; u++) { alphas[idx2(0, u, U)] = alphas[idx2(0, u-1, U)] + log_probs[idx3(0, u-1, labels[u-1], s, V)]; } for (int t = 1; t < T; t++) { for (int u = 1; u < U; u++) { float no_emit = alphas[idx2(t-1, u, U)] + log_probs[idx3(t-1, u, blank, s, V)]; float emit = alphas[idx2(t, u-1, U)] + log_probs[idx3(t, u-1, labels[u-1], s, V)]; alphas[idx2(t, u, U)] = log_sum_exp(emit, no_emit); } } float forward_ll = alphas[idx2(T-1, U-1, U)] + log_probs[idx3(T-1, U-1, blank, s, V)]; // Backward pass float *betas = (float *) malloc(T * U * sizeof(float)); betas[idx2(T-1, U-1, U)] = log_probs[idx3(T-1, U-1, blank, s, V)]; for (int t = T-2; t >= 0; t--) { betas[idx2(t, U-1, U)] = betas[idx2(t+1, U-1, U)] + log_probs[idx3(t, U-1, blank, s, V)]; } for (int u = U-2; u >= 0; u--) { betas[idx2(T-1, u, U)] = betas[idx2(T-1, u+1, U)] + log_probs[idx3(T-1, u, labels[u], s, V)]; } for (int t = T-2; t >= 0; t--) { for (int u = U-2; u >= 0; u--) { float no_emit = betas[idx2(t+1, u, U)] + log_probs[idx3(t, u, blank, s, V)]; float emit = betas[idx2(t, u+1, U)] + log_probs[idx3(t, u, labels[u], s, V)]; betas[idx2(t, u, U)] = log_sum_exp(emit, no_emit); } } float backward_ll = betas[0]; float diff = fabs(backward_ll - forward_ll); float diff_tol = fmax(1e-6 * fabs(forward_ll), 1e-8); if (diff > diff_tol) { printf("WARNING: Forward backward likelihood mismatch %f\n", diff); } // Gradients w.r.t. log probabilities grads[idx3(T-1, U-1, blank, s, V)] = alphas[idx2(T-1, U-1, U)]; for (int t = 0; t < T-1; t++) { for (int u = 0; u < U; u++) { grads[idx3(t, u, blank, s, V)] = alphas[idx2(t, u, U)] + betas[idx2(t+1, u, U)]; } } for (int t = 0; t < T; t++) { for (int u = 0; u < U - 1; u++) { int l = labels[u]; grads[idx3(t, u, l, s, V)] = alphas[idx2(t, u, U)] + betas[idx2(t, u+1, U)]; } } for (int t = 0; t < T; t++) { for (int u = 0; u < U; u++) { for (int v = 0; v < V; v++) { float g = grads[idx3(t, u, v, s, V)]; if (g != 0) { grads[idx3(t, u, v, s, V)] = -exp(-forward_ll + g + log_probs[idx3(t, u, v, s, V)]); } } } } // Cleanup free(alphas); free(betas); return -forward_ll; } void cost_and_grad(float* log_probs, float* grads, float* costs, int* flat_labels, int* label_lengths, int* input_lengths, int batch_size, int max_t, int max_u, int alphabet_size, int blank) { #pragma omp parallel for for (int mb = 0; mb < batch_size; ++mb) { int T = input_lengths[mb]; // Length of utterance (time) int U = label_lengths[mb] + 1; // Length of transcription int mb_offset = mb * max_t * max_u * alphabet_size; int label_offset = cumsum(label_lengths, mb); costs[mb] = cost_and_grad_single(log_probs + mb_offset, grads + mb_offset, flat_labels + label_offset, blank, T, U, alphabet_size, max_u); } } void transduce(THFloatTensor *th_log_probs, THIntTensor *th_labels, THIntTensor *th_input_lengths, THIntTensor *th_label_lengths, THFloatTensor *th_costs, THFloatTensor *th_grads, int blank) { int batch_size = THFloatTensor_size(th_log_probs, 0); int max_t = THFloatTensor_size(th_log_probs, 1); int max_u = THFloatTensor_size(th_log_probs, 2); int alphabet_size = THFloatTensor_size(th_log_probs, 3); float *log_probs = THFloatTensor_data(th_log_probs); int *input_lengths = THIntTensor_data(th_input_lengths); int *labels = THIntTensor_data(th_labels); int *label_lengths = THIntTensor_data(th_label_lengths); float *costs = THFloatTensor_data(th_costs); float *grads = THFloatTensor_data(th_grads); cost_and_grad(log_probs, grads, costs, labels, label_lengths, input_lengths, batch_size, max_t, max_u, alphabet_size, blank); }
GB_binop__ge_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_uint32 // A.*B function (eWiseMult): GB_AemultB__ge_uint32 // A*D function (colscale): GB_AxD__ge_uint32 // D*A function (rowscale): GB_DxB__ge_uint32 // C+=B function (dense accum): GB_Cdense_accumB__ge_uint32 // C+=b function (dense accum): GB_Cdense_accumb__ge_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_uint32 // C=scalar+B GB_bind1st__ge_uint32 // C=scalar+B' GB_bind1st_tran__ge_uint32 // C=A+scalar GB_bind2nd__ge_uint32 // C=A'+scalar GB_bind2nd_tran__ge_uint32 // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_UINT32 || GxB_NO_GE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ge_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__rminus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rminus_int16 // A.*B function (eWiseMult): GB_AemultB__rminus_int16 // A*D function (colscale): GB_AxD__rminus_int16 // D*A function (rowscale): GB_DxB__rminus_int16 // C+=B function (dense accum): GB_Cdense_accumB__rminus_int16 // C+=b function (dense accum): GB_Cdense_accumb__rminus_int16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_int16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_int16 // C=scalar+B GB_bind1st__rminus_int16 // C=scalar+B' GB_bind1st_tran__rminus_int16 // C=A+scalar GB_bind2nd__rminus_int16 // C=A'+scalar GB_bind2nd_tran__rminus_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (y - x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT16 || GxB_NO_RMINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rminus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rminus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rminus_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rminus_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rminus_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rminus_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__rminus_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rminus_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rminus_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rminus_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB_bind1st_tran__rminus_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB_bind2nd_tran__rminus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <vector> #include <utility> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { const double alpha = nnvm::get<double>(attrs.parsed); CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data( rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { CHECK_EQ(output.shape(), input.shape()); const double alpha = nnvm::get<double>(attrs.parsed); const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<DType>(stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType *in = input.data().dptr<DType>(); const IType *column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType *row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType *row_data_start = in + row_item_start_iter; DType *output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } template<typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else if (out_stype == kDefaultStorage && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req< mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, xpu>:: Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), DType(alpha)); }); }); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser([](NodeAttrs* attrs) { \ attrs->parsed = std::stod(attrs->dict["scalar"]); \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_argument("scalar", "float", "scalar input") } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
debug_task_shared.c
// This testcase checks emission of debug info for variables // inside shared clause of task construct. // REQUIRES: x86_64-linux // RUN: %clang_cc1 -debug-info-kind=constructor -DSHARED -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK // RUN: %clang_cc1 -debug-info-kind=constructor -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=NEG // RUN: %clang_cc1 -debug-info-kind=line-directives-only -DSHARED -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=NEG // RUN: %clang_cc1 -debug-info-kind=line-tables-only -DSHARED -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=NEG // RUN: %clang_cc1 -debug-info-kind=limited -DSHARED -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK // expected-no-diagnostics // CHECK-LABEL: define internal i32 @.omp_task_entry. // CHECK-DAG: [[CONTEXT:%[0-9]+]] = load %struct.anon*, %struct.anon** %__context.addr.i, align 8 // CHECK-DAG: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata [[SHARE2:![0-9]+]], metadata !DIExpression(DW_OP_deref)) // CHECK-DAG: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata [[SHARE3:![0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref)) // CHECK-DAG: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata [[SHARE1:![0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_deref)) // CHECK-DAG: [[SHARE2]] = !DILocalVariable(name: "share2" // CHECK-DAG: [[SHARE3]] = !DILocalVariable(name: "share3" // CHECK-DAG: [[SHARE1]] = !DILocalVariable(name: "share1" // NEG-LABEL: define internal i32 @.omp_task_entry. // NEG: [[CONTEXT:%[0-9]+]] = load %struct.anon*, %struct.anon** %__context.addr.i, align 8 // NEG-NOT: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata {{![0-9]+}}, metadata !DIExpression(DW_OP_deref)) extern int printf(const char *, ...); int foo(int n) { int share1 = 9, share2 = 11, share3 = 13, priv1, priv2, fpriv; fpriv = n + 4; if (n < 2) return n; else { #if SHARED #pragma omp task shared(share1, share2) private(priv1, priv2) firstprivate(fpriv) shared(share3) #else #pragma omp task private(priv1, priv2) firstprivate(fpriv) #endif { priv1 = n; priv2 = n + 2; share2 += share3; printf("share1 = %d, share2 = %d, share3 = %d\n", share1, share2, share3); share1 = priv1 + priv2 + fpriv + foo(n - 1) + share2 + share3; } #pragma omp taskwait return share1 + share2 + share3; } } int main() { int n = 10; printf("foo(%d) = %d\n", n, foo(n)); return 0; }
infogain_openmp.c
// information gain C-Adaptation #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <errno.h> #include <sys/time.h> #include <time.h> #include <omp.h> #include "common.h" #include "cpmidx_double.h" #include "blaslapack.h" #include "randnumber.h" #include "outputtiming.h" #include "calculatenorm.h" #include "calculatemaxchange.h" #include "getentropy.h" #include "getinfogainforattribute.h" #include "infogain_openmp.h" /** * InfoGain calculates the InformationGain and GainRatio of the matrix "data" according to the classification given by classColumn * * Parameter * data double* IN, matrix to calculate IG and GR for, instance times attribute matrix, attributes stored column wise * m int IN, first dimension of matrix data (rows) * n int IN, second dimension of matrix data (columns) * classColumn int* IN, array of class attributes for every entity (row) in data * numDistClasses int IN, number of distinct classes for the instances in matrix "data" * indexInfoGain int* OUT, array of index of attributes sorted descendingly wrt information gain * infoGainSorted double* OUT, array of the according information gains of these attributes * indexGainRatio int* OUT, arry of index of attributes sorted descendingly wrt gain ratio * gainRatioSorted double* OUT, array of the according gain ratio of these attributes * */ int infoGain_openmp(double * data, int m, int n, double * classColumn, int numDistClasses, idx_double * indexedInfoGainSorted, idx_double * indexedGainRatioSorted) { //check number of supported cores int numProcs = omp_get_num_procs(); int i; //index variable used in for-loops int numInstances = m; //number of instances in matrix data int numAttributes = n; //number of attributes in matrix data double H = 0.0; //entropy double * classvalueCount = (double*) malloc(sizeof(double) * numDistClasses * 2); double * attributevalueCount = (double*) malloc(sizeof(double) * m * 2 * numProcs); // get Entropy of the classification H = getEntropy(classColumn, m, 1, classvalueCount, numDistClasses); double * timing = (double*) malloc(sizeof(double) * numAttributes * 1); //timing in seconds double splitinfo = 0.0; struct timeval start, end; #pragma omp parallel num_threads(numProcs) { #pragma omp for private(i, start, end, splitinfo) for (i = 0; i < numAttributes; ++i) { int whoami = omp_get_thread_num(); gettimeofday(&start, 0); indexedInfoGainSorted[i].val = getInfoGainForAttribute(data + i * m, classColumn, numDistClasses, classvalueCount, numInstances, H); //TODELETE //printf("indexedInfoGainSorted[%d].val = %f\n", i, indexedInfoGainSorted[i].val); indexedInfoGainSorted[i].idx = i; //getEntropy(attributeColumn, noClassification, null) --- null since classvalueCount is not needed //TODO allocate second variable "attributevalueCount" of size m times 2 splitinfo = getEntropy(data + i * m, m, 0, attributevalueCount + whoami * 2 * numProcs, m); if (splitinfo != 0.0) { indexedGainRatioSorted[i].val = indexedInfoGainSorted[i].val / splitinfo; } else { indexedGainRatioSorted[i].val = 0.0; } indexedGainRatioSorted[i].idx = i; gettimeofday(&end, 0); // save timing in seconds timing[i] = (double) ( ( 1.0 * end.tv_sec - start.tv_sec) + 1E-6 * (1.0 * (end.tv_usec - start.tv_usec)) ); } } //TODO sort infoGain and GainRatio and store new indices of those values in descending order qsort((void*) indexedInfoGainSorted, numAttributes, sizeof(idx_double), cmpidx_double); qsort((void*) indexedGainRatioSorted, numAttributes, sizeof(idx_double), cmpidx_double); //free allocated memory free(classvalueCount); free(attributevalueCount); free(timing); // return 0 if everything worked fine, else return error code return 0; }
GB_binop__bset_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bset_int16 // A.*B function (eWiseMult): GB_AemultB__bset_int16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bset_int16 // C+=b function (dense accum): GB_Cdense_accumb__bset_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_int16 // C=scalar+B GB_bind1st__bset_int16 // C=scalar+B' GB_bind1st_tran__bset_int16 // C=A+scalar GB_bind2nd__bset_int16 // C=A'+scalar GB_bind2nd_tran__bset_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_BITSET (aij, bij, int16_t, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITSET (x, y, int16_t, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT16 || GxB_NO_BSET_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bset_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bset_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bset_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bset_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bset_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bset_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = GB_BITSET (x, bij, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bset_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = GB_BITSET (aij, y, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (x, aij, int16_t, 16) ; \ } GrB_Info GB_bind1st_tran__bset_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (aij, y, int16_t, 16) ; \ } GrB_Info GB_bind2nd_tran__bset_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
serial_tree_learner.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "monotone_constraints.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif using namespace json11; namespace LightGBM { /*! \brief forward declaration */ class CostEfficientGradientBoosting; /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: friend CostEfficientGradientBoosting; explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data) override; void ResetConfig(const Config* config) override; Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian, const Json& forced_split_json) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) override; void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices(used_indices, num_data); } void AddPredictionToScore(const Tree* tree, double* out_score) const override { if (tree->num_leaves() <= 1) { return; } CHECK(tree->num_leaves() <= data_partition_->num_leaves()); #pragma omp parallel for schedule(static) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; bool IsHistColWise() const override { return is_hist_colwise_; } protected: void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_, int feature_index, int real_fidx, bool is_feature_used, int num_data, const LeafSplits* leaf_splits, SplitInfo* best_split); void GetMultiValBin(const Dataset* dataset, bool is_first_time); virtual std::vector<int8_t> GetUsedFeatures(bool is_tree_level); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf); /* Force splits with forced_split_json dict and then return num splits forced.*/ virtual int32_t ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf, int* right_leaf, int* cur_depth, bool *aborted_last_force_split); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief used for generate used features */ Random random_; /*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */ std::vector<int8_t> is_feature_used_; /*! \brief used feature indices in current tree */ std::vector<int> used_feature_indices_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores minimum and maximum constraints for each leaf */ std::unique_ptr<LeafConstraints<ConstraintEntry>> constraints_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; std::vector<int> valid_feature_indices_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_; #endif /*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */ std::vector<char, Common::AlignmentAllocator<char, kAlignedSize>> is_data_in_leaf_; /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; int num_threads_; std::vector<int> ordered_bin_indices_; bool is_constant_hessian_; std::unique_ptr<MultiValBin> multi_val_bin_; bool is_hist_colwise_; std::unique_ptr<CostEfficientGradientBoosting> cegb_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
multiplication.h
#ifndef MULTIPLICATION_H #define MULTIPLICATION_H #include <omp.h> #include <sys/time.h> #include "matrix.h" ull mstandard(ull ar, ull ac, // Matrix A rows and cols ull br, ull bc, // Matrix B rows and cols ul threads) // Number of threads { timeval start, end; matrix* A = alloc(ar, ac), * B = alloc(br, bc), * C = alloc(ar, bc); // Result fill(A); fill(B); gettimeofday(&start, NULL); /** * Simple method */ #pragma omp parallel shared(C) num_threads(threads) { ull i = 0, j = 0, k = 0; #pragma omp for private(i, j, k) iterate(, i, A->rows) { iterate(, j, B->cols) { T dot = 0; // Store multiplication result iterate(, k, A->cols) { dot += A(i, k) * B(k, j); } C(i, j) = dot; } } } gettimeofday(&end, NULL); #ifdef WRITE write(C, "product.txt"); printf("\tResult matrix is written to `product.txt`\n"); #endif dealloc(A); dealloc(B); dealloc(C); return ELAPSED; } ull mblocks(ull ar, ull ac, ull br, ull bc, ul threads) { timeval start, end; matrix* A = alloc(ar, ac), * B = alloc(br, bc), * C = alloc(ar, bc); fill(A); fill(B); gettimeofday(&start, NULL); /** * Block method */ #pragma omp parallel shared(C) num_threads(threads) { ull lt = threads, iv = 0, ih = 0; #pragma omp for schedule(static) collapse(2) iterate(, iv, lt) { // Vertical block index iterate(, ih, lt) { // Horizontal block index for (ull i = iv * A->rows / lt; i < (iv + 1) * A->rows / lt; ++i) { for (ull j = ih * A->cols / lt; j < (ih + 1) * A->cols / lt; ++j) { iterate(ull, k, A->cols) { C(i, j) += A(i, k) * B(k, j); } } } } } } gettimeofday(&end, NULL); #ifdef WRITE write(C, "product.txt"); #endif dealloc(A); dealloc(B); dealloc(C); return ELAPSED; } ull mcheckerboard(ull ar, ull ac, ull br, ull bc, ul threads) { timeval start, end; matrix* A = alloc(ar, ac), * B = alloc(br, bc), * C = alloc(ar, bc); fill(A); fill(B); gettimeofday(&start, NULL); /** * Checkerboard method */ #pragma omp parallel shared(C) { ull bv = threads, // Vertical blocks bh = threads, // Horizontal blocks bk = threads, // K blocks iv = 0, ih = 0, ik = 0; // Indices #pragma omp for collapse(2) iterate(, iv, bv) { iterate(, ih, bh) { iterate(, ik, bk) { for (ull i = iv * A->rows / bv; i < (iv + 1) * A->rows / bv; ++i) { for (ull j = ih * B->cols / bh; j < (ih + 1) * B->cols / bh; ++j) { for (ull k = ik * A->cols / bk; k < (ik + 1) * A->cols / bk; ++k) { #pragma omp atomic C(i, j) += A(i, k) * B(k, j); } } } } } } } gettimeofday(&end, NULL); #ifdef WRITE write(C, "product.txt"); #endif dealloc(A); dealloc(B); dealloc(C); return ELAPSED; } #endif // MULTIPLICATION_H
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MaxTextExtent], *description; RectangleInfo extent; MagickWand *wand; CacheView *view; size_t number_threads; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->number_threads=wand_view->number_threads; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) wand_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width,wand_view->number_threads); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~WandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict duplex_indexes, *magick_restrict indexes; register const PixelPacket *magick_restrict duplex_pixels, *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelBlack(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MaxTextExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != WandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == WandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (destination->extent.height-destination->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w T h r e a d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewThreads() sets the number of threads in a thread team. % % The format of the SetWandViewDescription method is: % % void SetWandViewThreads(WandView *image_view, % const size_t number_threads) % % A description of each parameter follows: % % o image_view: the image view. % % o number_threads: the number of threads in a thread team. % */ MagickExport void SetWandViewThreads(WandView *image_view, const size_t number_threads) { assert(image_view != (WandView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->number_threads=number_threads; if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource)) image_view->number_threads=GetOpenMPMaximumThreads(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict pixels; register IndexPacket *magick_restrict destination_indexes; register ssize_t x; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
par_rap.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGBuildCoarseOperator *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildCoarseOperator( hypre_ParCSRMatrix *RT, hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix **RAP_ptr ) { hypre_BoomerAMGBuildCoarseOperatorKT( RT, A, P, 0, RAP_ptr); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGBuildCoarseOperatorKT( hypre_ParCSRMatrix *RT, hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, HYPRE_Int keepTranspose, hypre_ParCSRMatrix **RAP_ptr ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RAP] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *RT_diag = hypre_ParCSRMatrixDiag(RT); hypre_CSRMatrix *RT_offd = hypre_ParCSRMatrixOffd(RT); HYPRE_Int num_cols_diag_RT = hypre_CSRMatrixNumCols(RT_diag); HYPRE_Int num_cols_offd_RT = hypre_CSRMatrixNumCols(RT_offd); HYPRE_Int num_rows_offd_RT = hypre_CSRMatrixNumRows(RT_offd); hypre_ParCSRCommPkg *comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT); HYPRE_Int num_recvs_RT = 0; HYPRE_Int num_sends_RT = 0; HYPRE_Int *send_map_starts_RT; HYPRE_Int *send_map_elmts_RT; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P); HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag); HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag); HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag); hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P); HYPRE_BigInt *col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd); HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd); HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd); HYPRE_BigInt first_col_diag_P = hypre_ParCSRMatrixFirstColDiag(P); HYPRE_BigInt last_col_diag_P; HYPRE_Int num_cols_diag_P = hypre_CSRMatrixNumCols(P_diag); HYPRE_Int num_cols_offd_P = hypre_CSRMatrixNumCols(P_offd); HYPRE_BigInt *coarse_partitioning = hypre_ParCSRMatrixColStarts(P); HYPRE_BigInt *RT_partitioning = hypre_ParCSRMatrixColStarts(RT); hypre_ParCSRMatrix *RAP; HYPRE_BigInt *col_map_offd_RAP = NULL; HYPRE_BigInt *new_col_map_offd_RAP = NULL; hypre_CSRMatrix *RAP_int = NULL; HYPRE_Real *RAP_int_data; HYPRE_Int *RAP_int_i; HYPRE_BigInt *RAP_int_j; hypre_CSRMatrix *RAP_ext; HYPRE_Real *RAP_ext_data = NULL; HYPRE_Int *RAP_ext_i = NULL; HYPRE_BigInt *RAP_ext_j = NULL; hypre_CSRMatrix *RAP_diag; HYPRE_Real *RAP_diag_data; HYPRE_Int *RAP_diag_i; HYPRE_Int *RAP_diag_j; hypre_CSRMatrix *RAP_offd; HYPRE_Real *RAP_offd_data = NULL; HYPRE_Int *RAP_offd_i = NULL; HYPRE_Int *RAP_offd_j = NULL; HYPRE_Int RAP_size; HYPRE_Int RAP_ext_size; HYPRE_Int RAP_diag_size; HYPRE_Int RAP_offd_size; HYPRE_Int P_ext_diag_size; HYPRE_Int P_ext_offd_size; HYPRE_BigInt first_col_diag_RAP; HYPRE_BigInt last_col_diag_RAP; HYPRE_Int num_cols_offd_RAP = 0; hypre_CSRMatrix *R_diag; HYPRE_Real *R_diag_data; HYPRE_Int *R_diag_i; HYPRE_Int *R_diag_j; hypre_CSRMatrix *R_offd; HYPRE_Real *R_offd_data; HYPRE_Int *R_offd_i; HYPRE_Int *R_offd_j; HYPRE_Real *RA_diag_data_array = NULL; HYPRE_Int *RA_diag_j_array = NULL; HYPRE_Real *RA_offd_data_array = NULL; HYPRE_Int *RA_offd_j_array = NULL; hypre_CSRMatrix *Ps_ext; HYPRE_Real *Ps_ext_data; HYPRE_Int *Ps_ext_i; HYPRE_BigInt *Ps_ext_j; HYPRE_Real *P_ext_diag_data = NULL; HYPRE_Int *P_ext_diag_i = NULL; HYPRE_Int *P_ext_diag_j = NULL; HYPRE_Real *P_ext_offd_data = NULL; HYPRE_Int *P_ext_offd_i = NULL; HYPRE_Int *P_ext_offd_j = NULL; HYPRE_BigInt *P_big_offd_j = NULL; HYPRE_BigInt *col_map_offd_Pext; HYPRE_Int *map_P_to_Pext = NULL; HYPRE_Int *map_P_to_RAP = NULL; HYPRE_Int *map_Pext_to_RAP = NULL; HYPRE_Int *P_marker; HYPRE_Int **P_mark_array; HYPRE_Int **A_mark_array; HYPRE_Int *A_marker; HYPRE_BigInt *temp; HYPRE_BigInt n_coarse, n_coarse_RT; HYPRE_Int square = 1; HYPRE_Int num_cols_offd_Pext = 0; HYPRE_Int ic, i, j, k; HYPRE_Int i1, i2, i3, ii, ns, ne, size, rest; HYPRE_Int cnt = 0; /*value; */ HYPRE_Int jj1, jj2, jj3, jcol; HYPRE_Int *jj_count, *jj_cnt_diag, *jj_cnt_offd; HYPRE_Int jj_counter, jj_count_diag, jj_count_offd; HYPRE_Int jj_row_begining, jj_row_begin_diag, jj_row_begin_offd; HYPRE_Int start_indexing = 0; /* start indexing for RAP_data at 0 */ HYPRE_Int num_nz_cols_A; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Real r_entry; HYPRE_Real r_a_product; HYPRE_Real r_a_p_product; HYPRE_Real zero = 0.0; HYPRE_Int *prefix_sum_workspace; /*----------------------------------------------------------------------- * Copy ParCSRMatrix RT into CSRMatrix R so that we have row-wise access * to restriction . *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); num_threads = hypre_NumThreads(); if (comm_pkg_RT) { num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT); num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT); send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT); send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT); } else if (num_procs > 1) { hypre_MatvecCommPkgCreate(RT); comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT); num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT); num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT); send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT); send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT); } hypre_CSRMatrixTranspose(RT_diag,&R_diag,1); if (num_cols_offd_RT) { hypre_CSRMatrixTranspose(RT_offd,&R_offd,1); R_offd_data = hypre_CSRMatrixData(R_offd); R_offd_i = hypre_CSRMatrixI(R_offd); R_offd_j = hypre_CSRMatrixJ(R_offd); } /*----------------------------------------------------------------------- * Access the CSR vectors for R. Also get sizes of fine and * coarse grids. *-----------------------------------------------------------------------*/ R_diag_data = hypre_CSRMatrixData(R_diag); R_diag_i = hypre_CSRMatrixI(R_diag); R_diag_j = hypre_CSRMatrixJ(R_diag); n_coarse = hypre_ParCSRMatrixGlobalNumCols(P); num_nz_cols_A = num_cols_diag_A + num_cols_offd_A; n_coarse_RT = hypre_ParCSRMatrixGlobalNumCols(RT); if (n_coarse != n_coarse_RT) square = 0; /*----------------------------------------------------------------------- * Generate Ps_ext, i.e. portion of P that is stored on neighbor procs * and needed locally for triple matrix product *-----------------------------------------------------------------------*/ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedIntMap send_map_elmts_RT_inverse_map; HYPRE_Int *send_map_elmts_starts_RT_aggregated = NULL; HYPRE_Int *send_map_elmts_RT_aggregated = NULL; HYPRE_Int send_map_elmts_RT_inverse_map_initialized = num_sends_RT > 0 && send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0] > 0; if (send_map_elmts_RT_inverse_map_initialized) { hypre_UnorderedIntSet send_map_elmts_set; hypre_UnorderedIntSetCreate(&send_map_elmts_set, 2*(send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0]), 16*hypre_NumThreads()); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++) { HYPRE_Int key = send_map_elmts_RT[i]; hypre_UnorderedIntSetPut(&send_map_elmts_set, key); } HYPRE_Int send_map_elmts_unique_size; HYPRE_Int *send_map_elmts_unique = hypre_UnorderedIntSetCopyToArray(&send_map_elmts_set, &send_map_elmts_unique_size); hypre_UnorderedIntSetDestroy(&send_map_elmts_set); hypre_UnorderedIntMapCreate(&send_map_elmts_RT_inverse_map, 2*send_map_elmts_unique_size, 16*hypre_NumThreads()); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < send_map_elmts_unique_size; i++) { hypre_UnorderedIntMapPutIfAbsent(&send_map_elmts_RT_inverse_map, send_map_elmts_unique[i], i); } hypre_TFree(send_map_elmts_unique, HYPRE_MEMORY_HOST); send_map_elmts_starts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_elmts_unique_size + 1, HYPRE_MEMORY_HOST); send_map_elmts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT], HYPRE_MEMORY_HOST); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < send_map_elmts_unique_size; i++) { send_map_elmts_starts_RT_aggregated[i] = 0; } #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++) { HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]); #pragma omp atomic send_map_elmts_starts_RT_aggregated[idx]++; } for (i = 0; i < send_map_elmts_unique_size - 1; i++) { send_map_elmts_starts_RT_aggregated[i + 1] += send_map_elmts_starts_RT_aggregated[i]; } send_map_elmts_starts_RT_aggregated[send_map_elmts_unique_size] = send_map_starts_RT[num_sends_RT]; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = send_map_starts_RT[num_sends_RT] - 1; i >= send_map_starts_RT[0]; i--) { HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]); HYPRE_Int offset = hypre_fetch_and_add(send_map_elmts_starts_RT_aggregated + idx, -1) - 1; send_map_elmts_RT_aggregated[offset] = i; } } #endif /* HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { Ps_ext = hypre_ParCSRMatrixExtractBExt(P,A,1); Ps_ext_data = hypre_CSRMatrixData(Ps_ext); Ps_ext_i = hypre_CSRMatrixI(Ps_ext); Ps_ext_j = hypre_CSRMatrixBigJ(Ps_ext); } P_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); P_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); P_ext_diag_i[0] = 0; P_ext_offd_i[0] = 0; P_ext_diag_size = 0; P_ext_offd_size = 0; last_col_diag_P = first_col_diag_P + (HYPRE_BigInt) num_cols_diag_P - 1; /*HYPRE_Int prefix_sum_workspace[2*(num_threads + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(num_threads + 1), HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j) #endif /* This threading causes problem, maybe the prefix_sum in combination with BigInt? */ { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_A); HYPRE_Int P_ext_diag_size_private = 0; HYPRE_Int P_ext_offd_size_private = 0; for (i = i_begin; i < i_end; i++) { for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++) if (Ps_ext_j[j] < first_col_diag_P || Ps_ext_j[j] > last_col_diag_P) P_ext_offd_size_private++; else P_ext_diag_size_private++; } hypre_prefix_sum_pair(&P_ext_diag_size_private, &P_ext_diag_size, &P_ext_offd_size_private, &P_ext_offd_size, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { if (P_ext_diag_size) { P_ext_diag_j = hypre_CTAlloc(HYPRE_Int, P_ext_diag_size, HYPRE_MEMORY_HOST); P_ext_diag_data = hypre_CTAlloc(HYPRE_Real, P_ext_diag_size, HYPRE_MEMORY_HOST); } if (P_ext_offd_size) { P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST); P_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size, HYPRE_MEMORY_HOST); P_ext_offd_data = hypre_CTAlloc(HYPRE_Real, P_ext_offd_size, HYPRE_MEMORY_HOST); //temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = i_begin; i < i_end; i++) { for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++) { HYPRE_BigInt value = Ps_ext_j[j]; if (value < first_col_diag_P || value > last_col_diag_P) { //Ps_ext_j[P_ext_offd_size_private] = value; //temp[P_ext_offd_size_private] = value; P_big_offd_j[P_ext_offd_size_private] = value; P_ext_offd_data[P_ext_offd_size_private++] = Ps_ext_data[j]; } else { P_ext_diag_j[P_ext_diag_size_private] = (HYPRE_Int)(Ps_ext_j[j] - first_col_diag_P); P_ext_diag_data[P_ext_diag_size_private++] = Ps_ext_data[j]; } } P_ext_diag_i[i+1] = P_ext_diag_size_private; P_ext_offd_i[i+1] = P_ext_offd_size_private; } } /* omp parallel */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Ps_ext); Ps_ext = NULL; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (P_ext_offd_size || num_cols_offd_P) { hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, P_ext_offd_size + num_cols_offd_P, 16*hypre_NumThreads()); #pragma omp parallel private(i) { #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < P_ext_offd_size; i++) { //hypre_UnorderedBigIntSetPut(&found_set, Ps_ext_j[i]); hypre_UnorderedBigIntSetPut(&found_set, P_big_offd_j[i]); } #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_P; i++) { hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_P[i]); } } /* omp parallel */ /* Warning on getting temp right !!!!! */ temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_Pext); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_UnorderedBigIntMap col_map_offd_Pext_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_Pext, &col_map_offd_Pext, &col_map_offd_Pext_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i=0 ; i < P_ext_offd_size; i++) //Ps_ext_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, Ps_ext_j[i]); P_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, P_big_offd_j[i]); if (num_cols_offd_Pext) hypre_UnorderedBigIntMapDestroy(&col_map_offd_Pext_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (P_ext_offd_size || num_cols_offd_P) { temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST); for (i=0; i < P_ext_offd_size; i++) //Ps_ext_j[i] = temp[i]; //temp[i] = Ps_ext_j[i]; temp[i] = P_big_offd_j[i]; cnt = P_ext_offd_size; for (i=0; i < num_cols_offd_P; i++) temp[cnt++] = col_map_offd_P[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_Pext = 1; HYPRE_BigInt value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_Pext++] = value; } } } if (num_cols_offd_Pext) col_map_offd_Pext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Pext, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_Pext; i++) col_map_offd_Pext[i] = temp[i]; if (P_ext_offd_size || num_cols_offd_P) hypre_TFree(temp, HYPRE_MEMORY_HOST); /*if (P_ext_offd_size) P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST);*/ for (i=0 ; i < P_ext_offd_size; i++) P_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Pext, //Ps_ext_j[i], P_big_offd_j[i], num_cols_offd_Pext); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (P_ext_offd_size) hypre_TFree(P_big_offd_j, HYPRE_MEMORY_HOST); /*if (num_procs > 1) { hypre_CSRMatrixDestroy(Ps_ext); Ps_ext = NULL; }*/ if (num_cols_offd_P) { map_P_to_Pext = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_Pext; i++) if (col_map_offd_Pext[i] == col_map_offd_P[cnt]) { map_P_to_Pext[cnt++] = i; if (cnt == num_cols_offd_P) break; } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime(); #endif /*----------------------------------------------------------------------- * First Pass: Determine size of RAP_int and set up RAP_int_i if there * are more than one processor and nonzero elements in R_offd *-----------------------------------------------------------------------*/ P_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST); A_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST); if (num_cols_offd_RT) { jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_offd_RT/num_threads; rest = num_cols_offd_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } /*----------------------------------------------------------------------- * Allocate marker arrays. *-----------------------------------------------------------------------*/ if (num_cols_offd_Pext || num_cols_diag_P) { P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_Pext, HYPRE_MEMORY_HOST); P_marker = P_mark_array[ii]; } A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST); A_marker = A_mark_array[ii]; /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A; i++) { A_marker[i] = -1; } /*----------------------------------------------------------------------- * Loop over exterior c-points *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { jj_row_begining = jj_counter; /*-------------------------------------------------------------------- * Loop over entries in row ic of R_offd. *--------------------------------------------------------------------*/ for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++) { i1 = R_offd_j[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_ext. *-----------------------------------------------------------*/ for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = P_ext_offd_j[jj3] + num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2+num_cols_offd_A] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_diag. *-----------------------------------------------------------*/ for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of P_offd. *-----------------------------------------------------------*/ for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; jj_counter++; } } } } } } jj_count[ii] = jj_counter; } /*----------------------------------------------------------------------- * Allocate RAP_int_data and RAP_int_j arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads-1; i++) jj_count[i+1] += jj_count[i]; RAP_size = jj_count[num_threads-1]; RAP_int_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RT+1, HYPRE_MEMORY_HOST); RAP_int_data = hypre_CTAlloc(HYPRE_Real, RAP_size, HYPRE_MEMORY_HOST); RAP_int_j = hypre_CTAlloc(HYPRE_BigInt, RAP_size, HYPRE_MEMORY_HOST); RAP_int_i[num_cols_offd_RT] = RAP_size; /*----------------------------------------------------------------------- * Second Pass: Fill in RAP_int_data and RAP_int_j. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_offd_RT/num_threads; rest = num_cols_offd_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ if (num_cols_offd_Pext || num_cols_diag_P) P_marker = P_mark_array[ii]; A_marker = A_mark_array[ii]; jj_counter = start_indexing; if (ii > 0) jj_counter = jj_count[ii-1]; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A; i++) { A_marker[i] = -1; } /*----------------------------------------------------------------------- * Loop over exterior c-points. *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { jj_row_begining = jj_counter; RAP_int_i[ic] = jj_counter; /*-------------------------------------------------------------------- * Loop over entries in row ic of R_offd. *--------------------------------------------------------------------*/ for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++) { i1 = R_offd_j[jj1]; r_entry = R_offd_data[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; r_a_product = r_entry * A_offd_data[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_ext. *-----------------------------------------------------------*/ for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; r_a_p_product = r_a_product * P_ext_diag_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = P_ext_offd_j[jj3] + num_cols_diag_P; r_a_p_product = r_a_product * P_ext_offd_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = col_map_offd_Pext[i3-num_cols_diag_P]; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RAP and can just add new contributions. *--------------------------------------------------------------*/ else { for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; r_a_p_product = r_a_product * P_ext_diag_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = P_ext_offd_j[jj3] + num_cols_diag_P; r_a_p_product = r_a_product * P_ext_offd_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; r_a_product = r_entry * A_diag_data[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2+num_cols_offd_A] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_diag. *-----------------------------------------------------------*/ for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; r_a_p_product = r_a_product * P_diag_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P; r_a_p_product = r_a_product * P_offd_data[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begining) { P_marker[i3] = jj_counter; RAP_int_data[jj_counter] = r_a_p_product; RAP_int_j[jj_counter] = col_map_offd_Pext[i3-num_cols_diag_P]; jj_counter++; } else { RAP_int_data[P_marker[i3]] += r_a_p_product; } } } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RAP and can just add new contributions. *--------------------------------------------------------------*/ else { for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; r_a_p_product = r_a_product * P_diag_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P; r_a_p_product = r_a_product * P_offd_data[jj3]; RAP_int_data[P_marker[i3]] += r_a_p_product; } } } } } if (num_cols_offd_Pext || num_cols_diag_P) hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST); hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST); } RAP_int = hypre_CSRMatrixCreate(num_cols_offd_RT,num_rows_offd_RT,RAP_size); hypre_CSRMatrixMemoryLocation(RAP_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(RAP_int) = RAP_int_i; hypre_CSRMatrixBigJ(RAP_int) = RAP_int_j; hypre_CSRMatrixData(RAP_int) = RAP_int_data; hypre_TFree(jj_count, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime(); #endif RAP_ext_size = 0; if (num_sends_RT || num_recvs_RT) { void *request; hypre_ExchangeExternalRowsInit(RAP_int, comm_pkg_RT, &request); RAP_ext = hypre_ExchangeExternalRowsWait(request); RAP_ext_i = hypre_CSRMatrixI(RAP_ext); RAP_ext_j = hypre_CSRMatrixBigJ(RAP_ext); RAP_ext_data = hypre_CSRMatrixData(RAP_ext); RAP_ext_size = RAP_ext_i[hypre_CSRMatrixNumRows(RAP_ext)]; } if (num_cols_offd_RT) { hypre_CSRMatrixDestroy(RAP_int); RAP_int = NULL; } RAP_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_DEVICE); RAP_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_DEVICE); first_col_diag_RAP = first_col_diag_P; last_col_diag_RAP = first_col_diag_P + num_cols_diag_P - 1; /*----------------------------------------------------------------------- * check for new nonzero columns in RAP_offd generated through RAP_ext *-----------------------------------------------------------------------*/ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntMap col_map_offd_RAP_inverse; if (RAP_ext_size || num_cols_offd_Pext) { hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, 2*(RAP_ext_size + num_cols_offd_Pext), 16*hypre_NumThreads()); cnt = 0; #pragma omp parallel private(i) { #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < RAP_ext_size; i++) { if (RAP_ext_j[i] < first_col_diag_RAP || RAP_ext_j[i] > last_col_diag_RAP) hypre_UnorderedBigIntSetPut(&found_set, RAP_ext_j[i]); } #pragma omp for HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_Pext; i++) { hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_Pext[i]); } } /* omp parallel */ temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_RAP); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_RAP, &col_map_offd_RAP, &col_map_offd_RAP_inverse); } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (RAP_ext_size || num_cols_offd_Pext) { temp = hypre_CTAlloc(HYPRE_BigInt, RAP_ext_size+num_cols_offd_Pext, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < RAP_ext_size; i++) if (RAP_ext_j[i] < first_col_diag_RAP || RAP_ext_j[i] > last_col_diag_RAP) temp[cnt++] = RAP_ext_j[i]; for (i=0; i < num_cols_offd_Pext; i++) temp[cnt++] = col_map_offd_Pext[i]; if (cnt) { hypre_BigQsort0(temp,0,cnt-1); HYPRE_BigInt value = temp[0]; num_cols_offd_RAP = 1; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_RAP++] = value; } } } /* now evaluate col_map_offd_RAP */ if (num_cols_offd_RAP) col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_RAP, HYPRE_MEMORY_HOST); for (i=0 ; i < num_cols_offd_RAP; i++) col_map_offd_RAP[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_P) { map_P_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_RAP; i++) if (col_map_offd_RAP[i] == col_map_offd_P[cnt]) { map_P_to_RAP[cnt++] = i; if (cnt == num_cols_offd_P) break; } } if (num_cols_offd_Pext) { map_Pext_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_Pext, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_RAP; i++) if (col_map_offd_RAP[i] == col_map_offd_Pext[cnt]) { map_Pext_to_RAP[cnt++] = i; if (cnt == num_cols_offd_Pext) break; } } /*----------------------------------------------------------------------- * Convert RAP_ext column indices *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i=0; i < RAP_ext_size; i++) if (RAP_ext_j[i] < first_col_diag_RAP || RAP_ext_j[i] > last_col_diag_RAP) RAP_ext_j[i] = (HYPRE_BigInt)num_cols_diag_P #ifdef HYPRE_CONCURRENT_HOPSCOTCH +(HYPRE_BigInt)hypre_UnorderedBigIntMapGet(&col_map_offd_RAP_inverse, RAP_ext_j[i]); #else +(HYPRE_BigInt)hypre_BigBinarySearch(col_map_offd_RAP, RAP_ext_j[i],num_cols_offd_RAP); #endif else RAP_ext_j[i] -= first_col_diag_RAP; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (num_cols_offd_RAP) hypre_UnorderedBigIntMapDestroy(&col_map_offd_RAP_inverse); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime(); #endif /* need to allocate new P_marker etc. and make further changes */ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ jj_cnt_diag = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_diag_RT/num_threads; rest = num_cols_diag_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_RAP, HYPRE_MEMORY_HOST); A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST); P_marker = P_mark_array[ii]; A_marker = A_mark_array[ii]; jj_count_diag = start_indexing; jj_count_offd = start_indexing; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A; i++) { A_marker[i] = -1; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, RAP_{ic,ic}. and for all points * being added to row ic of RAP_diag and RAP_offd through RAP_ext *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (square) P_marker[ic] = jj_count_diag++; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (send_map_elmts_RT_inverse_map_initialized) { HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic); if (i != -1) { for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++) { HYPRE_Int jj = send_map_elmts_RT_aggregated[j]; for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++) { jcol = (HYPRE_Int)RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; jj_count_diag++; } } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; jj_count_offd++; } } } } } // if (set) } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ for (i=0; i < num_sends_RT; i++) for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++) if (send_map_elmts_RT[j] == ic) { for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++) { jcol = (HYPRE_Int) RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; jj_count_diag++; } } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; jj_count_offd++; } } } break; } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ /*-------------------------------------------------------------------- * Loop over entries in row ic of R_diag. *--------------------------------------------------------------------*/ for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++) { i1 = R_diag_j[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_ext. *-----------------------------------------------------------*/ for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++) { i3 = P_ext_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_diag) { P_marker[i3] = jj_count_diag; jj_count_diag++; } } for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++) { i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]]+num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_offd) { P_marker[i3] = jj_count_offd; jj_count_offd++; } } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (A_marker[i2+num_cols_offd_A] != ic) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = ic; /*----------------------------------------------------------- * Loop over entries in row i2 of P_diag. *-----------------------------------------------------------*/ for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++) { i3 = P_diag_j[jj3]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_diag) { P_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of P_offd. *-----------------------------------------------------------*/ if (num_cols_offd_P) { for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++) { i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (P_marker[i3] < jj_row_begin_offd) { P_marker[i3] = jj_count_offd; jj_count_offd++; } } } } } } /*-------------------------------------------------------------------- * Set RAP_diag_i and RAP_offd_i for this row. *--------------------------------------------------------------------*/ /* RAP_diag_i[ic] = jj_row_begin_diag; RAP_offd_i[ic] = jj_row_begin_offd; */ } jj_cnt_diag[ii] = jj_count_diag; jj_cnt_offd[ii] = jj_count_offd; } for (i=0; i < num_threads-1; i++) { jj_cnt_diag[i+1] += jj_cnt_diag[i]; jj_cnt_offd[i+1] += jj_cnt_offd[i]; } jj_count_diag = jj_cnt_diag[num_threads-1]; jj_count_offd = jj_cnt_offd[num_threads-1]; RAP_diag_i[num_cols_diag_RT] = jj_count_diag; RAP_offd_i[num_cols_diag_RT] = jj_count_offd; /*----------------------------------------------------------------------- * Allocate RAP_diag_data and RAP_diag_j arrays. * Allocate RAP_offd_data and RAP_offd_j arrays. *-----------------------------------------------------------------------*/ RAP_diag_size = jj_count_diag; if (RAP_diag_size) { RAP_diag_data = hypre_CTAlloc(HYPRE_Real, RAP_diag_size, HYPRE_MEMORY_DEVICE); RAP_diag_j = hypre_CTAlloc(HYPRE_Int, RAP_diag_size, HYPRE_MEMORY_DEVICE); } RAP_offd_size = jj_count_offd; if (RAP_offd_size) { RAP_offd_data = hypre_CTAlloc(HYPRE_Real, RAP_offd_size, HYPRE_MEMORY_DEVICE); RAP_offd_j = hypre_CTAlloc(HYPRE_Int, RAP_offd_size, HYPRE_MEMORY_DEVICE); } if (RAP_offd_size == 0 && num_cols_offd_RAP != 0) { num_cols_offd_RAP = 0; hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST); } RA_diag_data_array = hypre_TAlloc(HYPRE_Real, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST); RA_diag_j_array = hypre_TAlloc(HYPRE_Int, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { RA_offd_data_array = hypre_TAlloc(HYPRE_Real, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST); RA_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------------- * Second Pass: Fill in RAP_diag_data and RAP_diag_j. * Second Pass: Fill in RAP_offd_data and RAP_offd_j. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { size = num_cols_diag_RT/num_threads; rest = num_cols_diag_RT - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ P_marker = P_mark_array[ii]; A_marker = A_mark_array[ii]; for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++) { P_marker[ic] = -1; } for (i = 0; i < num_nz_cols_A ; i++) { A_marker[i] = -1; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (ii > 0) { jj_count_diag = jj_cnt_diag[ii-1]; jj_count_offd = jj_cnt_offd[ii-1]; } // temporal matrix RA = R*A // only need to store one row per thread because R*A and (R*A)*P are fused // into one loop. hypre_CSRMatrix RA_diag, RA_offd; RA_diag.data = RA_diag_data_array + num_cols_diag_A*ii; RA_diag.j = RA_diag_j_array + num_cols_diag_A*ii; RA_diag.num_nonzeros = 0; RA_offd.num_nonzeros = 0; if (num_cols_offd_A) { RA_offd.data = RA_offd_data_array + num_cols_offd_A*ii; RA_offd.j = RA_offd_j_array + num_cols_offd_A*ii; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (ic = ns; ic < ne; ic++) { /*-------------------------------------------------------------------- * Create diagonal entry, RAP_{ic,ic} and add entries of RAP_ext *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; RAP_diag_i[ic] = jj_row_begin_diag; RAP_offd_i[ic] = jj_row_begin_offd; HYPRE_Int ra_row_begin_diag = RA_diag.num_nonzeros; HYPRE_Int ra_row_begin_offd = RA_offd.num_nonzeros; if (square) { P_marker[ic] = jj_count_diag; RAP_diag_data[jj_count_diag] = zero; RAP_diag_j[jj_count_diag] = ic; jj_count_diag++; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (send_map_elmts_RT_inverse_map_initialized) { HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic); if (i != -1) { for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++) { HYPRE_Int jj = send_map_elmts_RT_aggregated[j]; for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++) { jcol = (HYPRE_Int)RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; RAP_diag_data[jj_count_diag] = RAP_ext_data[k]; RAP_diag_j[jj_count_diag] = jcol; jj_count_diag++; } else RAP_diag_data[P_marker[jcol]] += RAP_ext_data[k]; } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; RAP_offd_data[jj_count_offd] = RAP_ext_data[k]; RAP_offd_j[jj_count_offd] = jcol-num_cols_diag_P; jj_count_offd++; } else RAP_offd_data[P_marker[jcol]] += RAP_ext_data[k]; } } } } // if (set) } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ for (i=0; i < num_sends_RT; i++) for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++) if (send_map_elmts_RT[j] == ic) { for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++) { jcol = (HYPRE_Int)RAP_ext_j[k]; if (jcol < num_cols_diag_P) { if (P_marker[jcol] < jj_row_begin_diag) { P_marker[jcol] = jj_count_diag; RAP_diag_data[jj_count_diag] = RAP_ext_data[k]; RAP_diag_j[jj_count_diag] = jcol; jj_count_diag++; } else RAP_diag_data[P_marker[jcol]] += RAP_ext_data[k]; } else { if (P_marker[jcol] < jj_row_begin_offd) { P_marker[jcol] = jj_count_offd; RAP_offd_data[jj_count_offd] = RAP_ext_data[k]; RAP_offd_j[jj_count_offd] = jcol-num_cols_diag_P; jj_count_offd++; } else RAP_offd_data[P_marker[jcol]] += RAP_ext_data[k]; } } break; } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ /*-------------------------------------------------------------------- * Loop over entries in row ic of R_diag and compute row ic of RA. *--------------------------------------------------------------------*/ for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++) { i1 = R_diag_j[jj1]; r_entry = R_diag_data[jj1]; /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; HYPRE_Real a_entry = A_offd_data[jj2]; HYPRE_Int marker = A_marker[i2]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (marker < ra_row_begin_offd) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2] = RA_offd.num_nonzeros; RA_offd.data[RA_offd.num_nonzeros - ra_row_begin_offd] = r_entry * a_entry; RA_offd.j[RA_offd.num_nonzeros - ra_row_begin_offd] = i2; RA_offd.num_nonzeros++; } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RA and can just add new contributions. *--------------------------------------------------------------*/ else { RA_offd.data[marker - ra_row_begin_offd] += r_entry * a_entry; // JSP: compiler will more likely to generate FMA instructions // when we don't eliminate common subexpressions of // r_entry * A_offd_data[jj2] manually. } } // loop over entries in row i1 of A_offd } // num_cols_offd_A /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; HYPRE_Real a_entry = A_diag_data[jj2]; HYPRE_Int marker = A_marker[i2+num_cols_offd_A]; /*-------------------------------------------------------------- * Check A_marker to see if point i2 has been previously * visited. New entries in RAP only occur from unmarked points. *--------------------------------------------------------------*/ if (marker < ra_row_begin_diag) { /*----------------------------------------------------------- * Mark i2 as visited. *-----------------------------------------------------------*/ A_marker[i2+num_cols_offd_A] = RA_diag.num_nonzeros; RA_diag.data[RA_diag.num_nonzeros - ra_row_begin_diag] = r_entry * a_entry; RA_diag.j[RA_diag.num_nonzeros - ra_row_begin_diag] = i2; RA_diag.num_nonzeros++; } /*-------------------------------------------------------------- * If i2 is previously visited ( A_marker[12]=ic ) it yields * no new entries in RA and can just add new contributions. *--------------------------------------------------------------*/ else { RA_diag.data[marker - ra_row_begin_diag] += r_entry * a_entry; } } // loop over entries in row i1 of A_diag } // loop over entries in row ic of R_diag /*-------------------------------------------------------------------- * Loop over entries in row ic of RA_offd. *--------------------------------------------------------------------*/ for (jj1 = ra_row_begin_offd; jj1 < RA_offd.num_nonzeros; jj1++) { i1 = RA_offd.j[jj1 - ra_row_begin_offd]; r_a_product = RA_offd.data[jj1 - ra_row_begin_offd]; /*----------------------------------------------------------- * Loop over entries in row i1 of P_ext. *-----------------------------------------------------------*/ for (jj2 = P_ext_diag_i[i1]; jj2 < P_ext_diag_i[i1+1]; jj2++) { i2 = P_ext_diag_j[jj2]; HYPRE_Real p_entry = P_ext_diag_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_diag) { P_marker[i2] = jj_count_diag; RAP_diag_data[jj_count_diag] = r_a_product * p_entry; RAP_diag_j[jj_count_diag] = i2; jj_count_diag++; } else RAP_diag_data[marker] += r_a_product * p_entry; } for (jj2 = P_ext_offd_i[i1]; jj2 < P_ext_offd_i[i1+1]; jj2++) { i2 = map_Pext_to_RAP[P_ext_offd_j[jj2]] + num_cols_diag_P; HYPRE_Real p_entry = P_ext_offd_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_offd) { P_marker[i2] = jj_count_offd; RAP_offd_data[jj_count_offd] = r_a_product * p_entry; RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P; jj_count_offd++; } else RAP_offd_data[marker] += r_a_product * p_entry; } } // loop over entries in row ic of RA_offd /*-------------------------------------------------------------------- * Loop over entries in row ic of RA_diag. *--------------------------------------------------------------------*/ for (jj1 = ra_row_begin_diag; jj1 < RA_diag.num_nonzeros; jj1++) { HYPRE_Int i1 = RA_diag.j[jj1 - ra_row_begin_diag]; HYPRE_Real r_a_product = RA_diag.data[jj1 - ra_row_begin_diag]; /*----------------------------------------------------------------- * Loop over entries in row i1 of P_diag. *-----------------------------------------------------------------*/ for (jj2 = P_diag_i[i1]; jj2 < P_diag_i[i1+1]; jj2++) { i2 = P_diag_j[jj2]; HYPRE_Real p_entry = P_diag_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_diag) { P_marker[i2] = jj_count_diag; RAP_diag_data[jj_count_diag] = r_a_product * p_entry; RAP_diag_j[jj_count_diag] = i2; jj_count_diag++; } else { RAP_diag_data[marker] += r_a_product * p_entry; } } if (num_cols_offd_P) { for (jj2 = P_offd_i[i1]; jj2 < P_offd_i[i1+1]; jj2++) { i2 = map_P_to_RAP[P_offd_j[jj2]] + num_cols_diag_P; HYPRE_Real p_entry = P_offd_data[jj2]; HYPRE_Int marker = P_marker[i2]; /*-------------------------------------------------------- * Check P_marker to see that RAP_{ic,i2} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (marker < jj_row_begin_offd) { P_marker[i2] = jj_count_offd; RAP_offd_data[jj_count_offd] = r_a_product * p_entry; RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P; jj_count_offd++; } else { RAP_offd_data[marker] += r_a_product * p_entry; } } } // num_cols_offd_P } // loop over entries in row ic of RA_diag. } // Loop over interior c-points. hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST); hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST); } // omp parallel for /* check if really all off-diagonal entries occurring in col_map_offd_RAP are represented and eliminate if necessary */ P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RAP, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_offd_RAP; i++) P_marker[i] = -1; jj_count_offd = 0; #ifdef HYPRE_USING_ATOMIC #pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE #endif for (i=0; i < RAP_offd_size; i++) { i3 = RAP_offd_j[i]; #ifdef HYPRE_USING_ATOMIC if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1) { jj_count_offd++; } #else if (P_marker[i3]) { P_marker[i3] = 0; jj_count_offd++; } #endif } if (jj_count_offd < num_cols_offd_RAP) { new_col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_counter = 0; for (i=0; i < num_cols_offd_RAP; i++) if (!P_marker[i]) { P_marker[i] = jj_counter; new_col_map_offd_RAP[jj_counter++] = col_map_offd_RAP[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE #endif for (i=0; i < RAP_offd_size; i++) { i3 = RAP_offd_j[i]; RAP_offd_j[i] = P_marker[i3]; } num_cols_offd_RAP = jj_count_offd; hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST); col_map_offd_RAP = new_col_map_offd_RAP; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); RAP = hypre_ParCSRMatrixCreate(comm, n_coarse_RT, n_coarse, RT_partitioning, coarse_partitioning, num_cols_offd_RAP, RAP_diag_size, RAP_offd_size); /* Have RAP own coarse_partitioning instead of P */ hypre_ParCSRMatrixSetColStartsOwner(P,0); hypre_ParCSRMatrixSetColStartsOwner(RT,0); RAP_diag = hypre_ParCSRMatrixDiag(RAP); hypre_CSRMatrixI(RAP_diag) = RAP_diag_i; if (RAP_diag_size) { hypre_CSRMatrixData(RAP_diag) = RAP_diag_data; hypre_CSRMatrixJ(RAP_diag) = RAP_diag_j; } RAP_offd = hypre_ParCSRMatrixOffd(RAP); hypre_CSRMatrixI(RAP_offd) = RAP_offd_i; if (num_cols_offd_RAP) { hypre_CSRMatrixData(RAP_offd) = RAP_offd_data; hypre_CSRMatrixJ(RAP_offd) = RAP_offd_j; hypre_ParCSRMatrixColMapOffd(RAP) = col_map_offd_RAP; } if (num_procs > 1) { /* hypre_GenerateRAPCommPkg(RAP, A); */ hypre_MatvecCommPkgCreate(RAP); } *RAP_ptr = RAP; /*----------------------------------------------------------------------- * Free R, P_ext and marker arrays. *-----------------------------------------------------------------------*/ if (keepTranspose) { hypre_ParCSRMatrixDiagT(RT) = R_diag; } else { hypre_CSRMatrixDestroy(R_diag); } R_diag = NULL; if (num_cols_offd_RT) { if (keepTranspose) { hypre_ParCSRMatrixOffdT(RT) = R_offd; } else { hypre_CSRMatrixDestroy(R_offd); } R_offd = NULL; } if (num_sends_RT || num_recvs_RT) { hypre_CSRMatrixDestroy(RAP_ext); RAP_ext = NULL; } hypre_TFree(P_mark_array, HYPRE_MEMORY_HOST); hypre_TFree(A_mark_array, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(jj_cnt_diag, HYPRE_MEMORY_HOST); hypre_TFree(jj_cnt_offd, HYPRE_MEMORY_HOST); if (num_cols_offd_P) { hypre_TFree(map_P_to_Pext, HYPRE_MEMORY_HOST); hypre_TFree(map_P_to_RAP, HYPRE_MEMORY_HOST); } if (num_cols_offd_Pext) { hypre_TFree(col_map_offd_Pext, HYPRE_MEMORY_HOST); hypre_TFree(map_Pext_to_RAP, HYPRE_MEMORY_HOST); } if (P_ext_diag_size) { hypre_TFree(P_ext_diag_data, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_diag_j, HYPRE_MEMORY_HOST); } if (P_ext_offd_size) { hypre_TFree(P_ext_offd_data, HYPRE_MEMORY_HOST); hypre_TFree(P_ext_offd_j, HYPRE_MEMORY_HOST); } hypre_TFree(RA_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(RA_diag_j_array, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { hypre_TFree(RA_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(RA_offd_j_array, HYPRE_MEMORY_HOST); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (send_map_elmts_RT_inverse_map_initialized) { hypre_UnorderedIntMapDestroy(&send_map_elmts_RT_inverse_map); } hypre_TFree(send_map_elmts_starts_RT_aggregated, HYPRE_MEMORY_HOST); hypre_TFree(send_map_elmts_RT_aggregated, HYPRE_MEMORY_HOST); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RAP] += hypre_MPI_Wtime(); #endif return(0); }
flux_avx512.c
#include <stdio.h> #include <string.h> #include <stdint.h> #include <omp.h> #include <mathimf.h> #include <immintrin.h> #include <ktime.h> #include <geometry.h> #ifdef __USE_HW_COUNTER #include <perf.h> #include <kperf.h> #endif #include <phy.h> #define MAG0 (0.5 / 3) #define MAG1 (-MAG0) /* Calculates the residual */ void compute_residual(struct residual *restrict res) { #ifdef __USE_HW_COUNTER const struct fd fd = res->perf_counters->fd; struct counters start; perf_read(fd, &start); const uint64_t icycle = __rdtsc(); #endif struct ktime ktime; setktime(&ktime); const size_t bsz = res->bsz; const size_t nfnodes = res->nfnodes; const size_t dofs = res->dofs; const uint32_t snfc = res->snfc; const double pressure = res->pressure; const double velocity_u = res->velocity_u; const double velocity_v = res->velocity_v; const double velocity_w = res->velocity_w; const double *restrict f_xyz0 = res->f_xyz0; const double *restrict f_xyz1 = res->f_xyz1; const double *restrict f_xyz2 = res->f_xyz2; const double *restrict xyz0 = res->xyz0; const double *restrict xyz1 = res->xyz1; const double *restrict xyz2 = res->xyz2; const uint32_t *restrict ie = res->ie; const uint32_t *restrict part = res->part; const uint32_t *restrict snfic = res->snfic; const uint32_t *restrict n0 = res->n0; const uint32_t *restrict n1 = res->n1; const uint32_t *restrict nfptr = res->nfptr; const uint32_t *restrict sn0 = res->sn0; const uint32_t *restrict sn1 = res->sn1; const uint32_t *restrict sn2 = res->sn2; const double *restrict x0 = res->x0; const double *restrict x1 = res->x1; const double *restrict x2 = res->x2; const double *restrict x3 = res->x3; const double *restrict q = res->q; const double *restrict w0termsx = res->w0termsx; const double *restrict w0termsy = res->w0termsy; const double *restrict w0termsz = res->w0termsz; const double *restrict w1termsx = res->w1termsx; const double *restrict w1termsy = res->w1termsy; const double *restrict w1termsz = res->w1termsz; double *restrict gradx0 = res->gradx0; double *restrict gradx1 = res->gradx1; double *restrict gradx2 = res->gradx2; memset(gradx0, 0, dofs * sizeof(double)); memset(gradx1, 0, dofs * sizeof(double)); memset(gradx2, 0, dofs * sizeof(double)); double *restrict r = res->r; memset(r, 0, dofs * sizeof(double)); __assume_aligned(r, 64); /* Calculates the gradients at the nodes using weighted least squares This solves using Gram-Schmidt */ #pragma omp parallel { const uint32_t t = omp_get_thread_num(); const uint32_t ie0 = ie[t]; const uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const uint32_t idx0 = bsz * node0; const uint32_t idx1 = bsz * node1; double dq; double termx; double termy; double termz; if(part[node0] == t) { termx = w0termsx[i]; termy = w0termsy[i]; termz = w0termsz[i]; dq = q[idx1 + 0] - q[idx0 + 0]; gradx0[idx0 + 0] += termx * dq; gradx1[idx0 + 0] += termy * dq; gradx2[idx0 + 0] += termz * dq; dq = q[idx1 + 1] - q[idx0 + 1]; gradx0[idx0 + 1] += termx * dq; gradx1[idx0 + 1] += termy * dq; gradx2[idx0 + 1] += termz * dq; dq = q[idx1 + 2] - q[idx0 + 2]; gradx0[idx0 + 2] += termx * dq; gradx1[idx0 + 2] += termy * dq; gradx2[idx0 + 2] += termz * dq; dq = q[idx1 + 3] - q[idx0 + 3]; gradx0[idx0 + 3] += termx * dq; gradx1[idx0 + 3] += termy * dq; gradx2[idx0 + 3] += termz * dq; } if(part[node1] == t) { termx = w1termsx[i]; termy = w1termsy[i]; termz = w1termsz[i]; dq = q[idx0 + 0] - q[idx1 + 0]; gradx0[idx1 + 0] += termx * dq; gradx1[idx1 + 0] += termy * dq; gradx2[idx1 + 0] += termz * dq; dq = q[idx0 + 1] - q[idx1 + 1]; gradx0[idx1 + 1] += termx * dq; gradx1[idx1 + 1] += termy * dq; gradx2[idx1 + 1] += termz * dq; dq = q[idx0 + 2] - q[idx1 + 2]; gradx0[idx1 + 2] += termx * dq; gradx1[idx1 + 2] += termy * dq; gradx2[idx1 + 2] += termz * dq; dq = q[idx0 + 3] - q[idx1 + 3]; gradx0[idx1 + 3] += termx * dq; gradx1[idx1 + 3] += termy * dq; gradx2[idx1 + 3] += termz * dq; } } } /* Calculates the fluxes on the face and performs the flux balance */ /* AVX512 Registers */ const __m512d _zero = _mm512_set1_pd(0); const __m512d _pos1 = _mm512_set1_pd(1.0); const __m512d _pos2 = _mm512_set1_pd(2.0); const __m512d _half = _mm512_set1_pd(0.5); const __m512d _nhalf = _mm512_set1_pd(-0.5); const __m512d _nu95 = _mm512_set1_pd(0.95); const __m512d _beta = _mm512_set1_pd(BETA); #ifdef __USE_SKX const __m512d _rbeta = _mm512_rcp14_pd(_beta); #else const __m512d _rbeta = _mm512_rcp28_pd(_beta); #endif const __m256i _bsz = _mm256_set1_epi32(bsz); const __m256i _shift1 = _mm256_set1_epi32(1); const __m256i _shift2 = _mm256_set1_epi32(2); const __m256i _shift3 = _mm256_set1_epi32(3); const __m512i _ng = _mm512_set1_epi32(-1); const __m512d _und = _mm512_undefined_pd(); #pragma omp parallel { const uint32_t t = omp_get_thread_num(); const uint32_t ie0 = ie[t]; const uint32_t ie1 = ie[t+1]; const uint32_t lim = ie1 - ((ie1-ie0) % 8); const __m512i _t = _mm512_set1_epi32(t); uint32_t i; for(i = ie0; i < lim; i+=8) { const __m512d _xn = _mm512_load_pd((void const *) &x0[i]); const __m512d _yn = _mm512_load_pd((void const *) &x1[i]); const __m512d _zn = _mm512_load_pd((void const *) &x2[i]); const __m512d _ln = _mm512_load_pd((void const *) &x3[i]); /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ const __m512d _fdot = _mm512_abs_pd(_xn); __mmask _k0; __m512d _dot, _X1, _Y1, _Z1; _k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_LT_OS); _X1 = _mm512_mask_fnmadd_pd(_xn, _k0, _xn, _pos1); _Y1 = _mm512_mask_fnmadd_pd(_yn, _k0, _xn, _zero); _Z1 = _mm512_mask_fnmadd_pd(_zn, _k0, _xn, _zero); _k0 = _mm512_cmp_pd_mask(_fdot, _nu95, _CMP_GE_OS); _X1 = _mm512_mask_fnmadd_pd(_X1, _k0, _yn, _zero); _Y1 = _mm512_mask_fnmadd_pd(_Y1, _k0, _yn, _pos1); _Z1 = _mm512_mask_fnmadd_pd(_Z1, _k0, _yn, _zero); /* Normalize the first vector */ __m512d _size; _size = _mm512_mul_pd(_X1, _X1); _size = _mm512_fmadd_pd(_Y1, _Y1, _size); _size = _mm512_fmadd_pd(_Z1, _Z1, _size); #ifdef __USE_SKX _size = _mm512_rsqrt14_pd(_size); #else _size = _mm512_rsqrt28_pd(_size); #endif _X1 = _mm512_mul_pd(_X1, _size); _Y1 = _mm512_mul_pd(_Y1, _size); _Z1 = _mm512_mul_pd(_Z1, _size); const __m256i _n0 = _mm256_load_si256((__m256i const *) &n0[i]); const __m256i _n1 = _mm256_load_si256((__m256i const *) &n1[i]); const __m512d _x00 = _mm512_i32gather_pd(_n0, &xyz0[0], 8); const __m512d _x01 = _mm512_i32gather_pd(_n0, &xyz1[0], 8); const __m512d _x02 = _mm512_i32gather_pd(_n0, &xyz2[0], 8); const __m512d _x10 = _mm512_i32gather_pd(_n1, &xyz0[0], 8); const __m512d _x11 = _mm512_i32gather_pd(_n1, &xyz1[0], 8); const __m512d _x12 = _mm512_i32gather_pd(_n1, &xyz2[0], 8); const __m512d _xmean = _mm512_mul_pd(_half, _mm512_add_pd(_x00, _x10)); const __m512d _ymean = _mm512_mul_pd(_half, _mm512_add_pd(_x01, _x11)); const __m512d _zmean = _mm512_mul_pd(_half, _mm512_add_pd(_x02, _x12)); /* Take cross-product of normal and V1 to get V2 */ const __m512d _X2 = _mm512_fmsub_pd(_yn, _Z1, _mm512_mul_pd(_zn, _Y1)); const __m512d _Y2 = _mm512_fmsub_pd(_zn, _X1, _mm512_mul_pd(_xn, _Z1)); const __m512d _Z2 = _mm512_fmsub_pd(_xn, _Y1, _mm512_mul_pd(_yn, _X1)); /* Compute the stride indices */ const __m256i _idx0 = _mm256_mullo_epi32(_bsz, _n0); const __m256i _idx1 = _mm256_mullo_epi32(_bsz, _n1); const __m256i _idx01 = _mm256_add_epi32(_idx0, _shift1); const __m256i _idx11 = _mm256_add_epi32(_idx1, _shift1); const __m256i _idx02 = _mm256_add_epi32(_idx0, _shift2); const __m256i _idx12 = _mm256_add_epi32(_idx1, _shift2); const __m256i _idx03 = _mm256_add_epi32(_idx0, _shift3); const __m256i _idx13 = _mm256_add_epi32(_idx1, _shift3); /* Get variables on "left" and "right" side of face */ __m512d _q; __m512d _ubarL, _ubarR; __m512d _rx, _ry, _rz; __m512d _g0, _g1, _g2; __m512d _pL, _uL, _vL, _wL; __m512d _pR, _uR, _vR, _wR; /* Left */ _rx = _mm512_sub_pd(_xmean, _x00); _ry = _mm512_sub_pd(_ymean, _x01); _rz = _mm512_sub_pd(_zmean, _x02); /* Pressure */ _g0 = _mm512_i32gather_pd(_idx0, &gradx0[0], 8); _g1 = _mm512_i32gather_pd(_idx0, &gradx1[0], 8); _g2 = _mm512_i32gather_pd(_idx0, &gradx2[0], 8); _q = _mm512_i32gather_pd(_idx0, &q[0], 8); _pL = _mm512_fmadd_pd(_g0, _rx, _q); _pL = _mm512_fmadd_pd(_g1, _ry, _pL); _pL = _mm512_fmadd_pd(_g2, _rz, _pL); /* Velocity u */ _g0 = _mm512_i32gather_pd(_idx01, &gradx0[0], 8); _g1 = _mm512_i32gather_pd(_idx01, &gradx1[0], 8); _g2 = _mm512_i32gather_pd(_idx01, &gradx2[0], 8); _q = _mm512_i32gather_pd(_idx01, &q[0], 8); _uL = _mm512_fmadd_pd(_g0, _rx, _q); _uL = _mm512_fmadd_pd(_g1, _ry, _uL); _uL = _mm512_fmadd_pd(_g2, _rz, _uL); /* Velocity v */ _g0 = _mm512_i32gather_pd(_idx02, &gradx0[0], 8); _g1 = _mm512_i32gather_pd(_idx02, &gradx1[0], 8); _g2 = _mm512_i32gather_pd(_idx02, &gradx2[0], 8); _q = _mm512_i32gather_pd(_idx02, &q[0], 8); _vL = _mm512_fmadd_pd(_g0, _rx, _q); _vL = _mm512_fmadd_pd(_g1, _ry, _vL); _vL = _mm512_fmadd_pd(_g2, _rz, _vL); /* Velocity w */ _g0 = _mm512_i32gather_pd(_idx03, &gradx0[0], 8); _g1 = _mm512_i32gather_pd(_idx03, &gradx1[0], 8); _g2 = _mm512_i32gather_pd(_idx03, &gradx2[0], 8); _q = _mm512_i32gather_pd(_idx03, &q[0], 8); _wL = _mm512_fmadd_pd(_g0, _rx, _q); _wL = _mm512_fmadd_pd(_g1, _ry, _wL); _wL = _mm512_fmadd_pd(_g2, _rz, _wL); _ubarL = _mm512_mul_pd(_xn, _uL); _ubarL = _mm512_fmadd_pd(_yn, _vL, _ubarL); _ubarL = _mm512_fmadd_pd(_zn, _wL, _ubarL); /* Right */ _rx = _mm512_sub_pd(_xmean, _x10); _ry = _mm512_sub_pd(_ymean, _x11); _rz = _mm512_sub_pd(_zmean, _x12); /* Pressure */ _g0 = _mm512_i32gather_pd(_idx1, &gradx0[0], 8); _g1 = _mm512_i32gather_pd(_idx1, &gradx1[0], 8); _g2 = _mm512_i32gather_pd(_idx1, &gradx2[0], 8); _q = _mm512_i32gather_pd(_idx1, &q[0], 8); _pR = _mm512_fmadd_pd(_g0, _rx, _q); _pR = _mm512_fmadd_pd(_g1, _ry, _pR); _pR = _mm512_fmadd_pd(_g2, _rz, _pR); /* Velocity u */ _g0 = _mm512_i32gather_pd(_idx11, &gradx0[0], 8); _g1 = _mm512_i32gather_pd(_idx11, &gradx1[0], 8); _g2 = _mm512_i32gather_pd(_idx11, &gradx2[0], 8); _q = _mm512_i32gather_pd(_idx11, &q[0], 8); _uR = _mm512_fmadd_pd(_g0, _rx, _q); _uR = _mm512_fmadd_pd(_g1, _ry, _uR); _uR = _mm512_fmadd_pd(_g2, _rz, _uR); /* Velocity v */ _g0 = _mm512_i32gather_pd(_idx12, &gradx0[0], 8); _g1 = _mm512_i32gather_pd(_idx12, &gradx1[0], 8); _g2 = _mm512_i32gather_pd(_idx12, &gradx2[0], 8); _q = _mm512_i32gather_pd(_idx12, &q[0], 8); _vR = _mm512_fmadd_pd(_g0, _rx, _q); _vR = _mm512_fmadd_pd(_g1, _ry, _vR); _vR = _mm512_fmadd_pd(_g2, _rz, _vR); /* Velocity w */ _g0 = _mm512_i32gather_pd(_idx13, &gradx0[0], 8); _g1 = _mm512_i32gather_pd(_idx13, &gradx1[0], 8); _g2 = _mm512_i32gather_pd(_idx13, &gradx2[0], 8); _q = _mm512_i32gather_pd(_idx13, &q[0], 8); _wR = _mm512_fmadd_pd(_g0, _rx, _q); _wR = _mm512_fmadd_pd(_g1, _ry, _wR); _wR = _mm512_fmadd_pd(_g2, _rz, _wR); _ubarR = _mm512_mul_pd(_xn, _uR); _ubarR = _mm512_fmadd_pd(_yn, _vR, _ubarR); _ubarR = _mm512_fmadd_pd(_zn, _wR, _ubarR); const __m512d _dp = _mm512_sub_pd(_pR, _pL); const __m512d _du = _mm512_sub_pd(_uR, _uL); const __m512d _dv = _mm512_sub_pd(_vR, _vL); const __m512d _dw = _mm512_sub_pd(_wR, _wL); /* Compute averages for velocity variables only */ const __m512d _u = _mm512_mul_pd(_half, _mm512_add_pd(_uL, _uR)); const __m512d _v = _mm512_mul_pd(_half, _mm512_add_pd(_vL, _vR)); const __m512d _w = _mm512_mul_pd(_half, _mm512_add_pd(_wL, _wR)); __m512d _ubar; _ubar = _mm512_mul_pd(_xn, _u); _ubar = _mm512_fmadd_pd(_yn, _v, _ubar); _ubar = _mm512_fmadd_pd(_zn, _w, _ubar); /* Compute Phi's */ __m512d _phi1; _phi1 = _mm512_mul_pd(_xn, _beta); _phi1 = _mm512_fmadd_pd(_u, _ubar, _phi1); __m512d _phi2; _phi2 = _mm512_mul_pd(_yn, _beta); _phi2 = _mm512_fmadd_pd(_v, _ubar, _phi2); __m512d _phi3; _phi3 = _mm512_mul_pd(_zn, _beta); _phi3 = _mm512_fmadd_pd(_w, _ubar, _phi3); __m512d _phi4; _phi4 = _mm512_mul_pd(_Z2, _phi2); _phi4 = _mm512_fmsub_pd(_Y2, _phi3, _phi4); __m512d _phi5; _phi5 = _mm512_mul_pd(_X2, _phi3); _phi5 = _mm512_fmsub_pd(_Z2, _phi1, _phi5); __m512d _phi6; _phi6 = _mm512_mul_pd(_Y2, _phi1); _phi6 = _mm512_fmsub_pd(_X2, _phi2, _phi6); __m512d _phi7; _phi7 = _mm512_mul_pd(_Y1, _phi3); _phi7 = _mm512_fmsub_pd(_Z1, _phi2, _phi7); __m512d _phi8; _phi8 = _mm512_mul_pd(_Z1, _phi1); _phi8 = _mm512_fmsub_pd(_X1, _phi3, _phi8); __m512d _phi9; _phi9 = _mm512_mul_pd(_X1, _phi2); _phi9 = _mm512_fmsub_pd(_Y1, _phi1, _phi9); /* Compute eigenvalues, eigenvectors, and strengths */ const __m512d _c2 = _mm512_fmadd_pd(_ubar, _ubar, _beta); #ifdef __USE_SKX const __m512d _c = _mm512_mul_pd(_mm512_rsqrt14_pd(_c2), _c2); const __m512d _c2r = _mm512_rcp14_pd(_c2); #else const __m512d _c = _mm512_mul_pd(_mm512_rsqrt28_pd(_c2), _c2); const __m512d _c2r = _mm512_rcp28_pd(_c2); #endif const __m512d _bac = _mm512_add_pd(_ubar, _c); const __m512d _bsc = _mm512_sub_pd(_ubar, _c); /* Components of T(inverse) */ __m512d _ti11; _ti11 = _mm512_mul_pd(_u, _phi4); _ti11 = _mm512_fmadd_pd(_v, _phi5, _ti11); _ti11 = _mm512_fmadd_pd(_w, _phi6, _ti11); _ti11 = _mm512_fnmadd_pd(_ti11, _rbeta, _zero); __m512d _ti21; _ti21 = _mm512_mul_pd(_u, _phi7); _ti21 = _mm512_fmadd_pd(_v, _phi8, _ti21); _ti21 = _mm512_fmadd_pd(_w, _phi9, _ti21); _ti21 = _mm512_fnmadd_pd(_ti21, _rbeta, _zero); __m512d _ti31; _ti31 = _mm512_mul_pd(_half, _mm512_sub_pd(_c, _ubar)); _ti31 = _mm512_mul_pd(_ti31, _rbeta); __m512d _ti41; _ti41 = _mm512_mul_pd(_nhalf, _bac); _ti41 = _mm512_mul_pd(_ti41, _rbeta); /* jumps (T(inverse) * dq) */ __m512d _dv1; _dv1 = _mm512_mul_pd(_ti11, _dp); _dv1 = _mm512_fmadd_pd(_phi4, _du, _dv1); _dv1 = _mm512_fmadd_pd(_phi5, _dv, _dv1); _dv1 = _mm512_fmadd_pd(_phi6, _dw, _dv1); _dv1 = _mm512_mul_pd(_dv1, _c2r); __m512d _dv2; _dv2 = _mm512_mul_pd(_ti21, _dp); _dv2 = _mm512_fmadd_pd(_phi7, _du, _dv2); _dv2 = _mm512_fmadd_pd(_phi8, _dv, _dv2); _dv2 = _mm512_fmadd_pd(_phi9, _dw, _dv2); _dv2 = _mm512_mul_pd(_dv2, _c2r); __m512d _dv34; _dv34 = _mm512_mul_pd(_xn, _du); _dv34 = _mm512_fmadd_pd(_yn, _dv, _dv34); _dv34 = _mm512_fmadd_pd(_zn, _dw, _dv34); __m512d _dv3; _dv3 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti31), _dp, _dv34); _dv3 = _mm512_mul_pd(_dv3, _mm512_mul_pd(_half, _c2r)); __m512d _dv4; _dv4 = _mm512_fmadd_pd(_mm512_mul_pd(_pos2, _ti41), _dp, _dv34); _dv4 = _mm512_mul_pd(_dv4, _mm512_mul_pd(_half, _c2r)); /* Now get elements of T */ const __m512d _r13 = _mm512_mul_pd(_c, _beta); __m512d _r23; _r23 = _mm512_mul_pd(_u, _bac); _r23 = _mm512_fmadd_pd(_xn, _beta, _r23); __m512d _r33; _r33 = _mm512_mul_pd(_v, _bac); _r33 = _mm512_fmadd_pd(_yn, _beta, _r33); __m512d _r43; _r43 = _mm512_mul_pd(_w, _bac); _r43 = _mm512_fmadd_pd(_zn, _beta, _r43); const __m512d _r14 = _mm512_fnmadd_pd(_c, _beta, _zero); __m512d _r24; _r24 = _mm512_mul_pd(_u, _bsc); _r24 = _mm512_fmadd_pd(_xn, _beta, _r24); __m512d _r34; _r34 = _mm512_mul_pd(_v, _bsc); _r34 = _mm512_fmadd_pd(_yn, _beta, _r34); __m512d _r44; _r44 = _mm512_mul_pd(_w, _bsc); _r44 = _mm512_fmadd_pd(_zn, _beta, _r44); /* Calculate T* |lambda| * T(inverse) */ const __m512d _eig1 = _mm512_abs_pd(_ubar); const __m512d _eig2 = _mm512_abs_pd(_bac); const __m512d _eig3 = _mm512_abs_pd(_bsc); __m512d _t1; _t1 = _mm512_mul_pd(_mm512_mul_pd(_eig2, _r13), _dv3); _t1 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r14), _dv4, _t1); __m512d _t2; _t2 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _X1), _dv1); _t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _X2), _dv2, _t2); _t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r23), _dv3, _t2); _t2 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r24), _dv4, _t2); __m512d _t3; _t3 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Y1), _dv1); _t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Y2), _dv2, _t3); _t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r33), _dv3, _t3); _t3 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r34), _dv4, _t3); __m512d _t4; _t4 = _mm512_mul_pd(_mm512_mul_pd(_eig1, _Z1), _dv1); _t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig1, _Z2), _dv2, _t4); _t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig2, _r43), _dv3, _t4); _t4 = _mm512_fmadd_pd(_mm512_mul_pd(_eig3, _r44), _dv4, _t4); /* Modify to calculate .5(fl +fr) from nodes instead of extrapolated ones */ /* Left Side */ __m512d _fluxp1; _fluxp1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarL); __m512d _fluxp2; _fluxp2 = _mm512_mul_pd(_uL, _ubarL); _fluxp2 = _mm512_fmadd_pd(_xn, _pL, _fluxp2); _fluxp2 = _mm512_mul_pd(_ln, _fluxp2); __m512d _fluxp3; _fluxp3 = _mm512_mul_pd(_vL, _ubarL); _fluxp3 = _mm512_fmadd_pd(_yn, _pL, _fluxp3); _fluxp3 = _mm512_mul_pd(_ln, _fluxp3); __m512d _fluxp4; _fluxp4 = _mm512_mul_pd(_wL, _ubarL); _fluxp4 = _mm512_fmadd_pd(_zn, _pL, _fluxp4); _fluxp4 = _mm512_mul_pd(_ln, _fluxp4); /* Right Side */ __m512d _fluxm1; _fluxm1 = _mm512_mul_pd(_mm512_mul_pd(_ln, _beta), _ubarR); __m512d _fluxm2; _fluxm2 = _mm512_mul_pd(_uR, _ubarR); _fluxm2 = _mm512_fmadd_pd(_xn, _pR, _fluxm2); _fluxm2 = _mm512_mul_pd(_ln, _fluxm2); __m512d _fluxm3; _fluxm3 = _mm512_mul_pd(_vR, _ubarR); _fluxm3 = _mm512_fmadd_pd(_yn, _pR, _fluxm3); _fluxm3 = _mm512_mul_pd(_ln, _fluxm3); __m512d _fluxm4; _fluxm4 = _mm512_mul_pd(_wR, _ubarR); _fluxm4 = _mm512_fmadd_pd(_zn, _pR, _fluxm4); _fluxm4 = _mm512_mul_pd(_ln, _fluxm4); __m512d _res1; _res1 = _mm512_fnmadd_pd(_ln, _t1, _mm512_add_pd(_fluxm1, _fluxp1)); __m512d _res2; _res2 = _mm512_fnmadd_pd(_ln, _t2, _mm512_add_pd(_fluxm2, _fluxp2)); __m512d _res3; _res3 = _mm512_fnmadd_pd(_ln, _t3, _mm512_add_pd(_fluxm3, _fluxp3)); __m512d _res4; _res4 = _mm512_fnmadd_pd(_ln, _t4, _mm512_add_pd(_fluxm4, _fluxp4)); /* Update the residual */ __m512i _node, _part; __mmask _next; _node = _mm512_castsi256_si512(_n0); _part = _mm512_i32gather_epi32(_node, &part[0], 4); _next = _mm512_cmpeq_epi32_mask(_part, _t); /* Conflict detection instructions with multiple node update */ /* Node 0 Contributions */ do { __m512i _cd, _bnext; __m512d _v, _d; __mmask _crt; _cd = _mm512_mask_conflict_epi32(_ng, _next, _node); _bnext = _mm512_broadcastmw_epi32(_next); _crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext); _v = _mm512_mask_i32gather_pd(_und, _crt, _idx0, &r[0], 8); _d = _mm512_mask_fmadd_pd(_res1, _crt, _half, _v); _mm512_mask_i32scatter_pd(&r[0], _crt, _idx0, _d, 8); _v = _mm512_mask_i32gather_pd(_und, _crt, _idx01, &r[0], 8); _d = _mm512_mask_fmadd_pd(_res2, _crt, _half, _v); _mm512_mask_i32scatter_pd(&r[0], _crt, _idx01, _d, 8); _v = _mm512_mask_i32gather_pd(_und, _crt, _idx02, &r[0], 8); _d = _mm512_mask_fmadd_pd(_res3, _crt, _half, _v); _mm512_mask_i32scatter_pd(&r[0], _crt, _idx02, _d, 8); _v = _mm512_mask_i32gather_pd(_und, _crt, _idx03, &r[0], 8); _d = _mm512_mask_fmadd_pd(_res4, _crt, _half, _v); _mm512_mask_i32scatter_pd(&r[0], _crt, _idx03, _d, 8); _next = _mm512_kxor(_next, _crt); } while(_next); _node = _mm512_castsi256_si512(_n1); _part = _mm512_i32gather_epi32(_node, &part[0], 4); _next = _mm512_cmpeq_epi32_mask(_part, _t); /* Node 1 Contributions */ do { __m512i _cd, _bnext; __m512d _v, _d; __mmask _crt; _cd = _mm512_mask_conflict_epi32(_ng, _next, _node); _bnext = _mm512_broadcastmw_epi32(_next); _crt = _mm512_mask_testn_epi32_mask(_next, _cd, _bnext); _v = _mm512_mask_i32gather_pd(_und, _crt, _idx1, &r[0], 8); _d = _mm512_mask_fnmadd_pd(_res1, _crt, _half, _v); _mm512_mask_i32scatter_pd(&r[0], _crt, _idx1, _d, 8); _v = _mm512_mask_i32gather_pd(_und, _crt, _idx11, &r[0], 8); _d = _mm512_mask_fnmadd_pd(_res2, _crt, _half, _v); _mm512_mask_i32scatter_pd(&r[0], _crt, _idx11, _d, 8); _v = _mm512_mask_i32gather_pd(_und, _crt, _idx12, &r[0], 8); _d = _mm512_mask_fnmadd_pd(_res3, _crt, _half, _v); _mm512_mask_i32scatter_pd(&r[0], _crt, _idx12, _d, 8); _v = _mm512_mask_i32gather_pd(_und, _crt, _idx13, &r[0], 8); _d = _mm512_mask_fnmadd_pd(_res4, _crt, _half, _v); _mm512_mask_i32scatter_pd(&r[0], _crt, _idx13, _d, 8); _next = _mm512_kxor(_next, _crt); } while(_next); } /* Remainder loop */ for(i = lim; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const double xn = x0[i]; const double yn = x1[i]; const double zn = x2[i]; const double ln = x3[i]; const double xmean = 0.5f * (xyz0[node0] + xyz0[node1]); const double ymean = 0.5f * (xyz1[node0] + xyz1[node1]); const double zmean = 0.5f * (xyz2[node0] + xyz2[node1]); /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1 = (fabs(xn) < 0.95) ? (1 - xn * xn) : (- yn * xn); double Y1 = (fabs(xn) < 0.95) ? (- xn * yn) : (1 - yn * yn); double Z1 = (fabs(xn) < 0.95) ? (- xn * zn) : (- yn * zn); /* Normalize the first vector */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal and V1 to get V2 */ const double X2 = yn * Z1 - zn * Y1; const double Y2 = zn * X1 - xn * Z1; const double Z2 = xn * Y1 - yn * X1; /* Get variables on "left" and "right" side of face */ double rx = xmean - xyz0[node0]; double ry = ymean - xyz1[node0]; double rz = zmean - xyz2[node0]; const uint32_t idx0 = bsz * node0; const uint32_t idx1 = bsz * node1; // Pressure double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx; pL += gradx1[idx0 + 0] * ry; pL += gradx2[idx0 + 0] * rz; // Velocity u double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx; uL += gradx1[idx0 + 1] * ry; uL += gradx2[idx0 + 1] * rz; // Velocity v double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx; vL += gradx1[idx0 + 2] * ry; vL += gradx2[idx0 + 2] * rz; // Velocity w double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx; wL += gradx1[idx0 + 3] * ry; wL += gradx2[idx0 + 3] * rz; double ubarL = xn * uL; ubarL += yn * vL; ubarL += zn * wL; rx = xmean - xyz0[node1]; ry = ymean - xyz1[node1]; rz = zmean - xyz2[node1]; // Pressure double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx; pR += gradx1[idx1 + 0] * ry; pR += gradx2[idx1 + 0] * rz; // Velocity u double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx; uR += gradx1[idx1 + 1] * ry; uR += gradx2[idx1 + 1] * rz; // Velocity v double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx; vR += gradx1[idx1 + 2] * ry; vR += gradx2[idx1 + 2] * rz; // Velocity w double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx; wR += gradx1[idx1 + 3] * ry; wR += gradx2[idx1 + 3] * rz; double ubarR = xn * uR; ubarR += yn * vR; ubarR += zn * wR; /* Compute averages */ const double u = 0.5f * (uL + uR); const double v = 0.5f * (vL + vR); const double w = 0.5f * (wL + wR); double ubar = xn * u; ubar += yn * v; ubar += zn * w; double phi1 = xn * BETA; phi1 += u * ubar; double phi2 = yn * BETA; phi2 += v * ubar; double phi3 = zn * BETA; phi3 += w * ubar; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double c2 = ubar * ubar + BETA; double c = sqrt(c2); /* Now compute eigenvalues, eigenvectors, and strengths */ const double uac = ubar + c; const double usc = ubar - c; const double eig1 = fabs(ubar); const double eig2 = fabs(uac); const double eig3 = fabs(usc); const double dp = pR - pL; const double du = uR - uL; const double dv = vR - vL; const double dw = wR - wL; /* Components of T(inverse) */ double ti11 = u * phi4; ti11 += v * phi5; ti11 += w * phi6; ti11 = -ti11 / BETA; double ti21 = u * phi7; ti21 += v * phi8; ti21 += w * phi9; ti21 = -ti21 / BETA; double ti31 = 0.5f * (c - ubar); ti31 /= BETA; double ti41 = -0.5f * uac; ti41 /= BETA; /* jumps (T(inverse) * dq) */ double dv1 = ti11 * dp; dv1 += phi4 * du; dv1 += phi5 * dv; dv1 += phi6 * dw; dv1 /= c2; double dv2 = ti21 * dp; dv2 += phi7 * du; dv2 += phi8 * dv; dv2 += phi9 * dw; dv2 /= c2; double dv3 = 2.f * ti31 * dp; dv3 += xn * du; dv3 += yn * dv; dv3 += zn * dw; dv3 *= 0.5f / c2; double dv4 = 2.f * ti41 * dp; dv4 += xn * du; dv4 += yn * dv; dv4 += zn * dw; dv4 *= 0.5f / c2; /* Now get elements of T */ const double r13 = c * BETA; const double r23 = u * uac + xn * BETA; const double r33 = v * uac + yn * BETA; const double r43 = w * uac + zn * BETA; const double r14 = -c * BETA; const double r24 = u * usc + xn * BETA; const double r34 = v * usc + yn * BETA; const double r44 = w * usc + zn * BETA; /* Calculate T* |lambda| * T(inverse) */ double t1 = eig2 * r13 * dv3 + eig3 * r14 * dv4; double t2 = eig1 * X1 * dv1 + eig1 * X2 * dv2; t2 += eig2 * r23 * dv3 + eig3 * r24 * dv4; double t3 = eig1 * Y1 * dv1 + eig1 * Y2 * dv2; t3 += eig2 * r33 * dv3 + eig3 * r34 * dv4; double t4 = eig1 * Z1 * dv1 + eig1 * Z2 * dv2; t4 += eig2 * r43 * dv3 + eig3 * r44 * dv4; /* Modify to calculate .5(fl +fr) from nodes instead of extrapolated ones */ const double fluxp1 = ln * BETA * ubarL; const double fluxp2 = ln * (uL * ubarL + xn * pL); const double fluxp3 = ln * (vL * ubarL + yn * pL); const double fluxp4 = ln * (wL * ubarL + zn * pL); /* Now the right side */ const double fluxm1 = ln * BETA * ubarR; const double fluxm2 = ln * (uR * ubarR + xn * pR); const double fluxm3 = ln * (vR * ubarR + yn * pR); const double fluxm4 = ln * (wR * ubarR + zn * pR); const double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1); const double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2); const double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3); const double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4); r[idx0 + 0] = (part[node0] == t) ? (r[idx0 + 0] + res1) : r[idx0 + 0]; r[idx0 + 1] = (part[node0] == t) ? (r[idx0 + 1] + res2) : r[idx0 + 1]; r[idx0 + 2] = (part[node0] == t) ? (r[idx0 + 2] + res3) : r[idx0 + 2]; r[idx0 + 3] = (part[node0] == t) ? (r[idx0 + 3] + res4) : r[idx0 + 3]; r[idx1 + 0] = (part[node1] == t) ? (r[idx1 + 0] - res1) : r[idx1 + 0]; r[idx1 + 1] = (part[node1] == t) ? (r[idx1 + 1] - res2) : r[idx1 + 1]; r[idx1 + 2] = (part[node1] == t) ? (r[idx1 + 2] - res3) : r[idx1 + 2]; r[idx1 + 3] = (part[node1] == t) ? (r[idx1 + 3] - res4) : r[idx1 + 3]; } } uint32_t i; for(i = 0; i < snfc; i++) { const uint32_t if0 = snfic[i]; const uint32_t if1 = snfic[i+1]; uint32_t j; #pragma omp parallel for for(j = if0; j < if1; j++) { const uint32_t node0 = sn0[j]; const uint32_t node1 = sn1[j]; const uint32_t node2 = sn2[j]; const double p1 = q[bsz * node0]; const double p2 = q[bsz * node1]; const double p3 = q[bsz * node2]; const double ax = xyz0[node1] - xyz0[node0]; const double ay = xyz1[node1] - xyz1[node0]; const double az = xyz2[node1] - xyz2[node0]; const double bx = xyz0[node2] - xyz0[node0]; const double by = xyz1[node2] - xyz1[node0]; const double bz = xyz2[node2] - xyz2[node0]; /* Normal points away from grid interior. Magnitude is 1/3 area of surface triangle. */ double xn = ay * bz; xn -= az * by; xn *= MAG1; double yn = ax * bz; yn -= az * bx; yn *= MAG0; double zn = ax * by; zn -= ay * bx; zn *= MAG1; double pa = 0.125f * (p2 + p3); pa += 0.75f * p1; double pb = 0.125f * (p3 + p1); pb += 0.75f * p2; double pc = 0.125f * (p1 + p2); pc += 0.75f * p3; uint32_t idx; idx = bsz * node0; r[idx + 1] += xn * pa; r[idx + 2] += yn * pa; r[idx + 3] += zn * pa; idx = bsz * node1; r[idx + 1] += xn * pb; r[idx + 2] += yn * pb; r[idx + 3] += zn * pb; idx = bsz * node2; r[idx + 1] += xn * pc; r[idx + 2] += yn * pc; r[idx + 3] += zn * pc; } } /* Do the free boundaries */ #pragma omp parallel for for(i = 0; i < nfnodes; i++) { uint32_t n = nfptr[i]; /* Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn has the magnitude of the face contained in it. */ double xn = f_xyz0[i]; double yn = f_xyz1[i]; double zn = f_xyz2[i]; double area = xn * xn; area += yn * yn; area += zn * zn; area = sqrt(area); xn /= area; yn /= area; zn /= area; /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1, Y1, Z1; double dot = xn; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = -dot * yn; Z1 = -dot * zn; } else { dot = yn; X1 = -dot * xn; Y1 = 1.f - dot * yn; Z1 = -dot * zn; } /* Normalize the first vector (V1) */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal with V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Calculate elements of T and T(inverse) evaluated at free-stream */ double ubar0 = xn * velocity_u; ubar0 += yn * velocity_v; ubar0 += zn * velocity_w; double c20 = ubar0 * ubar0 + BETA; double c0 = sqrt(c20); double phi1 = xn * BETA; phi1 += velocity_u * ubar0; double phi2 = yn * BETA; phi2 += velocity_v * ubar0; double phi3 = zn * BETA; phi3 += velocity_w * ubar0; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double t13 = c0 * BETA; double t23 = velocity_u * (ubar0 + c0); t23 += xn * BETA; double t33 = velocity_v * (ubar0 + c0); t33 += yn * BETA; double t43 = velocity_w * (ubar0 + c0); t43 += zn * BETA; double t14 = -c0 * BETA; double t24 = velocity_u * (ubar0 - c0); t24 += xn * BETA; double t34 = velocity_v * (ubar0 - c0); t34 += yn * BETA; double t44 = velocity_w * (ubar0 - c0); t44 += zn * BETA; double ti11 = velocity_u * phi4; ti11 += velocity_v * phi5; ti11 += velocity_w * phi6; ti11 = -ti11/BETA; double ti21 = velocity_u * phi7; ti21 += velocity_v * phi8; ti21 += velocity_w * phi9; ti21 = -ti21/BETA; double ti31 = 0.5f * (c0 - ubar0); ti31 /= BETA; double ti41 = -0.5f * (c0 + ubar0); ti41 /= BETA; /* Now, get the variables on the "inside" */ double pi = q[bsz * n + 0]; double ui = q[bsz * n + 1]; double vi = q[bsz * n + 2]; double wi = q[bsz * n + 3]; double un = xn * ui; un += yn * vi; un += zn * wi; /* If ubar is negative, take the reference condition from outside */ double pr, ur, vr, wr; if(un > 0.f) { pr = pi; ur = ui; vr = vi; wr = wi; } else { pr = pressure; ur = velocity_u; vr = velocity_v; wr = velocity_w; } /* Set rhs */ double rhs1 = ti11 * pr; rhs1 += phi4 * ur; rhs1 += phi5 * vr; rhs1 += phi6 * wr; rhs1 /= c20; double rhs2 = ti21 * pr; rhs2 += phi7 * ur; rhs2 += phi8 * vr; rhs2 += phi9 * wr; rhs2 /= c20; double rhs3 = 2.f * ti31 * pi; rhs3 += xn * ui; rhs3 += yn * vi; rhs3 += zn * wi; rhs3 = 0.5f * rhs3 / c20; double rhs4 = 2.f * ti41 * pressure; rhs4 += xn * velocity_u; rhs4 += yn * velocity_v; rhs4 += zn * velocity_w; rhs4 = 0.5f * rhs4 / c20; /* Now do matrix multiplication to get values on boundary */ double pb = t13 * rhs3; pb += t14 * rhs4; double ub = X1 * rhs1; ub += X2 * rhs2; ub += t23 * rhs3; ub += t24 * rhs4; double vb = Y1 * rhs1; vb += Y2 * rhs2; vb += t33 * rhs3; vb += t34 * rhs4; double wb = Z1 * rhs1; wb += Z2 * rhs2; wb += t43 * rhs3; wb += t44 * rhs4; double ubar = xn * ub; ubar += yn * vb; ubar += zn * wb; uint32_t idx = bsz * n; r[idx + 0] += area * BETA * ubar; r[idx + 1] += area * (ub * ubar + xn * pb); r[idx + 2] += area * (vb * ubar + yn * pb); r[idx + 3] += area * (wb * ubar + zn * pb); } compute_time(&ktime, res->t); #ifdef __USE_HW_COUNTER const uint64_t cycle = __rdtsc() - icycle; struct counters end; perf_read(fd, &end); struct tot tot; perf_calc(start, end, &tot); res->perf_counters->ctrs->flux.cycles += cycle; res->perf_counters->ctrs->flux.tot.imcR += tot.imcR; res->perf_counters->ctrs->flux.tot.imcW += tot.imcW; res->perf_counters->ctrs->flux.tot.edcR += tot.edcR; res->perf_counters->ctrs->flux.tot.edcW += tot.edcW; #endif }