source
stringlengths
3
92
c
stringlengths
26
2.25M
_Atomic-4.c
/* PR c/65467 */ /* { dg-do compile } */ /* { dg-additional-options "-std=c11" } */ /* { dg-require-effective-target vect_simd_clones } */ #pragma omp declare simd int f1 (_Atomic int x, int y) /* { dg-warning "ignoring '#pragma omp declare simd' on function with '_Atomic' qualified non-'uniform' argument" } */ { return x + y; } #pragma omp declare simd uniform(x) int f2 (_Atomic int x, int y) { return x + y; }
omp_calloc_size_0.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> int main() { omp_alloctrait_t at[2]; omp_allocator_handle_t a; void *p[2]; at[0].key = omp_atk_pool_size; at[0].value = 2*1024*1024; at[1].key = omp_atk_fallback; at[1].value = omp_atv_default_mem_fb; a = omp_init_allocator(omp_large_cap_mem_space, 2, at); printf("allocator large created: %p\n", (void *)a); #pragma omp parallel num_threads(2) { int i = omp_get_thread_num(); p[i] = omp_calloc(1024, 0, a); #pragma omp barrier printf("th %d, ptr %p\n", i, p[i]); omp_free(p[i], a); } // Both pointers should be NULL if (p[0] == NULL && p[1] == NULL) { printf("passed\n"); return 0; } else { printf("failed: pointers %p %p\n", p[0], p[1]); return 1; } }
putty_fmt_plug.c
/* PuTTY private key cracker patch for JtR. Hacked together during Monsoon of * 2012 by Dhiru Kholia <dhiru.kholia at gmail.com> . * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * * p-ppk-crack v0.5 made by michu@neophob.com -- PuTTY private key cracker * * Source code based on putty svn version, check * http://www.chiark.greenend.org.uk/~sgtatham/putty/licence.html */ #if FMT_EXTERNS_H extern struct fmt_main fmt_putty; #elif FMT_REGISTERS_H john_register_one(&fmt_putty); #else #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "misc.h" #include "aes.h" #include "sha.h" #include <openssl/evp.h> #include "hmac_sha.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "PuTTY" #define FORMAT_NAME "Private Key" #define ALGORITHM_NAME "SHA1/AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #if defined (_OPENMP) static int omp_t = 1; #endif #define PUT_32BIT_MSB_FIRST(cp, value) ( \ (cp)[0] = (unsigned char)((value) >> 24), \ (cp)[1] = (unsigned char)((value) >> 16), \ (cp)[2] = (unsigned char)((value) >> 8), \ (cp)[3] = (unsigned char)(value) ) #define PUT_32BIT(cp, value) PUT_32BIT_MSB_FIRST(cp, value) static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static int any_cracked; static size_t cracked_size; static struct custom_salt { int is_mac, old_fmt; char alg[8]; int cipher, cipherblk; int public_blob_len, private_blob_len; char encryption[32]; char mac[20]; char comment[512]; unsigned char public_blob[4096]; unsigned char private_blob[4096]; } *cur_salt; static struct fmt_tests putty_tests[] = { {"$putty$1*16*1*0*10c434c33cf160352b7a5b3a1ecd8434f1066cac*432*000000077373682d647373000000806bb7ed4d03163f5be550dba68e0f1af7dae4b49f736ab452552a1163210c1366fd1f65a31bb526b1d3028a31d30b3315c19dc02417db99336f00b1f9565431d02fc59cd756ab6fe506b959df3799e4a70fcbe54ad9ef34d338014add8ac1f57f2a6dce8403c93709cb23d3c379f5de4f9fc45a73b3f9a43e6c1cc220bd38274b0000001500b4bf70cda203027a13135d43e459872eed384a3d0000008049a7d8e8d1db1630f9a9f6b1bf275d01e4287a4c2f038707d8c07ab664dbd264f6b4676de93c1f003bb57146a82314ab6c426628498209fa33c68a881abfd90dc1e978d430c9ace78d6c9895938494e91e3ca50132c9bde8fae4381e6fe59d03a9feee39b10cb2fea4e4d5f5ef10e523d34925f105eff665db2ac35e6cf0a1ac000000800def6e4f7ed4af0f1f8ed9524595d3fecd0a191ea9a6402d4235ee59ff2000011e36b5936280a3b5dc0b8d8ea7747e04ad92e46be8cb374d931c1e78bbdafea4ac16aba2e4b3cbd0779d28a609e848fb54332a169f24fac5e4c736c3dae4f95afe0aacaffb2d4829956fbd17d514614a45f8eefdd0d7d4982d101d72002f05fd*32*b38180c482949f3b4f44a20fd599c2cb411c671b4b120663bef9a61b360e442a*ssh-dss*aes256-cbc*dsa-key-20120721", "password"}, {"$putty$1*16*1*0*0dbfd7b4ec870df2fb8becc9efa6feeec683cd98*149*000000077373682d727361000000012500000081008ffc01db52ff6543a67b747e9882d04c32dc769b0b1fa575e1e838133d0bc381291af654b112a6ead07b157e5556d2052c7d516b605415687769f1095e2107067e08cc569e6382b31a42d93bbb4c189c01469872b65e50af3f81ed651cb4144c556cadefda8706f00c65699a074fc4fa5843a8370852d04b8f5575f0f2186611*352*9df7f3992f46922e9e03ee381a9ba06082fcf07f572f5a742400fdbdb8fd850161b0dd877ce1fb5433311c097463a8b0c0d7e98f58d361ca1579a01d30878c8b934653ee1278942ee1fbba092e495d2c8b2f5903b7cb3fd1b5c0445d993e3139fa3741dd51e968fb8cc9cc5c257d25cb94d404e448ec334fc1be713c3156a8c9110280623687a7f3c5a8dede7efa98d4bfd12ae8cef634c0c51dcdccf2a9f65e14bd3f5cb34270ad1ea02732d653073fc2e772e3dfea14fa29a50052831bafedd10bd73a13c52db956e2b674115d9620cc1136432edc4e2968681d177278999cda7cc6aeb9e2427a11f2aee67990c02a400144fab0cf4546d19726247a076423384bd98c3d6fb810ab5ee7ff248b8a87a6652dff7deb38349b9929ba29375dcdd90c7e01ad6900b48cf48300dd157cc80ae94a1d6e7545ec7fcaf96e0172acf08ee7e21e494ca601f5890ad9e8ca5ff89141aa50ae188842da52ae000d38d1fa*ssh-rsa*aes256-cbc*rsa-key-20120721", "openwall"}, {NULL} }; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; int is_old_fmt; if (strncmp(ciphertext, "$putty$", 7)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 7; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher */ goto err; if (!isdec(p)) goto err; res = atoi(p); if(res != 1) /* check cipher type */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* cipher block length*/ goto err; if (!isdec(p)) goto err; res = atoi(p); if(res != 16) /* check cipher block length */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* is_mac */ goto err; if (!isdec(p)) goto err; res = atoi(p); if(res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* old_fmt */ goto err; if (!isdec(p)) goto err; is_old_fmt = atoi(p); if(is_old_fmt != 0 && is_old_fmt!= 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* mac */ goto err; res = strlen(p); if (res > 128) goto err; if (hexlenl(p) != res) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* public_blob_len */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 4096) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* public_blob */ goto err; if (hexlenl(p) != res * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* private_blob_len */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 4096) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* private_blob */ goto err; if (hexlenl(p) != res * 2) goto err; if (!is_old_fmt) { if ((p = strtokm(NULL, "*")) == NULL) /* alg */ goto err; if (strlen(p) > 7) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* encryption */ goto err; if (strlen(p) > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* comment */ goto err; if (strlen(p) > 512) goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; /* ensure alignment */ static union { struct custom_salt _cs; ARCH_WORD_32 dummy; } un; struct custom_salt *cs = &(un._cs); memset(cs, 0, sizeof(un)); ctcopy += 7; /* skip over "$putty$" marker */ p = strtokm(ctcopy, "*"); cs->cipher = atoi(p); p = strtokm(NULL, "*"); cs->cipherblk = atoi(p); p = strtokm(NULL, "*"); cs->is_mac = atoi(p); p = strtokm(NULL, "*"); cs->old_fmt = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 20; i++) cs->mac[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs->public_blob_len = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs->public_blob_len; i++) cs->public_blob[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs->private_blob_len = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs->private_blob_len; i++) cs->private_blob[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; if(!cs->old_fmt) { p = strtokm(NULL, "*"); strcpy(cs->alg, p); p = strtokm(NULL, "*"); strcpy(cs->encryption, p); p = strtokm(NULL, "*"); strcpy(cs->comment, p); } MEM_FREE(keeptr); return (void *)cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void putty_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static void SHA_Simple(void *p, int len, unsigned char *output) { SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, p, len); SHA1_Final(output, &ctx); } static int LAME_ssh2_load_userkey(char *passphrase) { int passlen = strlen(passphrase); unsigned char out[sizeof(cur_salt->private_blob)]; AES_KEY akey; unsigned char iv[32]; /* Decrypt the private blob. */ if (cur_salt->cipher) { unsigned char key[40]; SHA_CTX s; if (cur_salt->private_blob_len % cur_salt->cipherblk) goto error; SHA1_Init(&s); SHA1_Update(&s, (void*)"\0\0\0\0", 4); SHA1_Update(&s, passphrase, passlen); SHA1_Final(key + 0, &s); SHA1_Init(&s); SHA1_Update(&s, (void*)"\0\0\0\1", 4); SHA1_Update(&s, passphrase, passlen); SHA1_Final(key + 20, &s); memset(iv, 0, 32); memset(&akey, 0, sizeof(AES_KEY)); if(AES_set_decrypt_key(key, 256, &akey) < 0) { fprintf(stderr, "AES_set_decrypt_key failed!\n"); } AES_cbc_encrypt(cur_salt->private_blob, out , cur_salt->private_blob_len, &akey, iv, AES_DECRYPT); } /* Verify the MAC. */ { unsigned char binary[20]; unsigned char *macdata; unsigned char macdata_ar[4*5+sizeof(cur_salt->alg)+sizeof(cur_salt->encryption)+sizeof(cur_salt->comment)+sizeof(cur_salt->public_blob)+sizeof(cur_salt->private_blob)+1]; int maclen; if (cur_salt->old_fmt) { /* MAC (or hash) only covers the private blob. */ macdata = out; maclen = cur_salt->private_blob_len; } else { unsigned char *p; int namelen = strlen(cur_salt->alg); int enclen = strlen(cur_salt->encryption); int commlen = strlen(cur_salt->comment); maclen = (4 + namelen + 4 + enclen + 4 + commlen + 4 + cur_salt->public_blob_len + 4 + cur_salt->private_blob_len); p = macdata_ar; #define DO_STR(s,len) PUT_32BIT(p,(len));memcpy(p+4,(s),(len));p+=4+(len) DO_STR(cur_salt->alg, namelen); DO_STR(cur_salt->encryption, enclen); DO_STR(cur_salt->comment, commlen); DO_STR(cur_salt->public_blob, cur_salt->public_blob_len); DO_STR(out, cur_salt->private_blob_len); macdata = macdata_ar; } if (cur_salt->is_mac) { SHA_CTX s; unsigned char mackey[20]; unsigned int length = 20; // HMAC_CTX ctx; char header[] = "putty-private-key-file-mac-key"; SHA1_Init(&s); SHA1_Update(&s, header, sizeof(header)-1); if (cur_salt->cipher && passphrase) SHA1_Update(&s, passphrase, passlen); SHA1_Final(mackey, &s); hmac_sha1(mackey, 20, macdata, maclen, binary, length); /* HMAC_Init(&ctx, mackey, 20, EVP_sha1()); * HMAC_Update(&ctx, macdata, maclen); * HMAC_Final(&ctx, binary, &length); * HMAC_CTX_cleanup(&ctx); */ } else { SHA_Simple(macdata, maclen, binary); } if (memcmp(cur_salt->mac, binary, 20) == 0) return 1; } error: return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { cracked[index] = LAME_ssh2_load_userkey(saved_key[index]); if (cracked[index]) #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return cracked[index]; } struct fmt_main fmt_putty = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, putty_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, putty_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
additionally.c
#include <unistd.h> #include "additionally.h" #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" // global GPU index: cuda.c int gpu_index = 0; // im2col.c float im2col_get_pixel(float *im, int height, int width, int channels, int row, int col, int channel, int pad) { row -= pad; col -= pad; if (row < 0 || col < 0 || row >= height || col >= width) return 0; return im[col + width*(row + height*channel)]; } // im2col.c //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c, h, w; int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; for (c = 0; c < channels_col; ++c) { int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } // fuse convolutional and batch_norm weights into one convolutional-layer void yolov2_fuse_conv_batchnorm(network net) { int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; if (l->type == CONVOLUTIONAL) { //printf(" Fuse Convolutional layer \t\t l->size = %d \n", l->size); if (l->batch_normalize) { int f; for (f = 0; f < l->n; ++f) { l->biases[f] = l->biases[f] - l->scales[f] * l->rolling_mean[f] / (sqrtf(l->rolling_variance[f]) + .000001f); const size_t filter_size = l->size*l->size*l->c; int i; for (i = 0; i < filter_size; ++i) { int w_index = f*filter_size + i; l->weights[w_index] = l->weights[w_index] * l->scales[f] / (sqrtf(l->rolling_variance[f]) + .000001f); } } l->batch_normalize = 0; } } } } static inline unsigned char xnor(unsigned char a, unsigned char b) { //return a == b; return !(a^b); } // INT-32 static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) { size_t src_i = index / 32; int src_shift = index % 32; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; return val; } static inline uint32_t xnor_int32(uint32_t a, uint32_t b) { return ~(a^b); } static inline uint64_t xnor_int64(uint64_t a, uint64_t b) { return ~(a^b); } static inline uint32_t fill_bit_int32(char src) { if (src == 0) return 0x00000000; else return 0xFFFFFFFF; } static inline uint64_t fill_bit_int64(char src) { if (src == 0) return 0x0000000000000000; else return 0xFFFFFFFFFFFFFFFF; } void binary_int32_printf(uint32_t src) { int i; for (i = 0; i < 32; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } void binary_int64_printf(uint64_t src) { int i; for (i = 0; i < 64; ++i) { if (src & 1) printf("1"); else printf("0"); src = src >> 1; } printf("\n"); } void get_mean_array(float *src, size_t size, size_t filters, float *mean_arr) { size_t i, counter; counter = 0; for (i = 0; i < size; i += size / filters) { mean_arr[counter++] = fabs(src[i]); } } void forward_blank_layer(layer l, network_state state) {} static inline void set_bit(unsigned char *const dst, size_t index) { size_t dst_i = index / 8; int dst_shift = index % 8; dst[dst_i] |= 1 << dst_shift; } static inline unsigned char get_bit(unsigned char const*const src, size_t index) { size_t src_i = index / 8; int src_shift = index % 8; unsigned char val = (src[src_i] & (1 << src_shift)) > 0; return val; } uint8_t reverse_8_bit(uint8_t a) { return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16; } uint32_t reverse_32_bit(uint32_t a) { // unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input)); return (reverse_8_bit(a >> 24) << 0) | (reverse_8_bit(a >> 16) << 8) | (reverse_8_bit(a >> 8) << 16) | (reverse_8_bit(a >> 0) << 24); } #define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j); void transpose32_optimized(uint32_t A[32]) { int j, k; unsigned m, t; //m = 0x0000FFFF; //for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) { // for (k = 0; k < 32; k = (k + j + 1) & ~j) { // t = (A[k] ^ (A[k + j] >> j)) & m; // A[k] = A[k] ^ t; // A[k + j] = A[k + j] ^ (t << j); // } //} j = 16; m = 0x0000FFFF; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 8; m = 0x00ff00ff; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 4; m = 0x0f0f0f0f; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 2; m = 0x33333333; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } j = 1; m = 0x55555555; for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); } // reverse Y for (j = 0; j < 16; ++j) { uint32_t tmp = A[j]; A[j] = reverse_32_bit(A[31 - j]); A[31 - j] = reverse_32_bit(tmp); } } void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n) { unsigned A_tmp[32]; int i; #pragma unroll for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m]; transpose32_optimized(A_tmp); #pragma unroll for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i]; } // transpose by 32-bit void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m, const int lda, const int ldb, const int block_size) { int i; #pragma omp parallel for for (i = 0; i < n; i += 32) { int j; for (j = 0; j < m; j += 32) { int a_index = i*lda + j; int b_index = j*ldb + i; transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32); //transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32); } for (; j < m; ++j) { if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i); } } } // popcnt 32 bit static inline int popcnt_32(uint32_t val32) { #ifdef WIN32 // Windows MSVS int tmp_count = __popcnt(val32); #else // Linux GCC int tmp_count = __builtin_popcount(val32); #endif return tmp_count; } void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) // out_h*out_w; { float val = 0; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { register uint32_t A_PART = ((uint32_t*)A)[i*lda + s]; register uint32_t B_PART = ((uint32_t*)B)[j*ldb + s]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int val += (2 * count - 32) * mean_val; } C[i*ldc + j] += val; } } } // 32 channels -> 1 channel (with 32 floats) // 256 channels -> 8 channels (with 32 floats) void repack_input(float *input, float *re_packed_input, int w, int h, int c) { const int items_per_channel = w * h; int chan, i; for (chan = 0; chan < c; chan += 32) { for (i = 0; i < items_per_channel; ++i) { int c_pack; for (c_pack = 0; c_pack < 32; ++c_pack) { float src = input[(chan + c_pack)*items_per_channel + i]; re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src; } } } } // transpose uint32_t matrix void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align) { //l.bit_align - algined (n) by 32 //new_ldb - aligned (k) by 256 int i; //#pragma omp parallel for for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c; { int j; for (j = 0; j < src_w; j += 1) // out_h*out_w; { ((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j]; } } } // convolution repacked bit matrix (32 channels -> 1 uint32_t) XNOR-net void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output, int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr) { int fil; // filter index #pragma omp parallel for for (fil = 0; fil < n; ++fil) { float mean_val = mean_arr[fil]; int chan, c_pack, y, x, f_y, f_x; // channel index for (chan = 0; chan < c / 32; ++chan) //for (chan = 0; chan < l.c; chan += 32) //for (c_pack = 0; c_pack < 32; ++c_pack) // input - y for (y = 0; y < h; ++y) // input - x for (x = 0; x < w; ++x) { int const output_index = fil*w*h + y*w + x; float sum = 0; // filter - y for (f_y = 0; f_y < size; ++f_y) { int input_y = y + f_y - pad; // filter - x for (f_x = 0; f_x < size; ++f_x) { int input_x = x + f_x - pad; if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue; // normal //float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x]; //float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x]; // packed //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //sum += input * weight; //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack]; //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack]; //uint32_t bit1 = input > 0; //uint32_t bit2 = weight > 0; //uint32_t count = (~(bit1 ^ bit2)) & 1; //float result = (2 * (float)count - 1) * mean_val; //printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result); //sum += result; uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x]; //uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x]; uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x]; uint32_t xnor_result = ~(input ^ weight); int32_t count = popcnt_32(xnor_result); // mandatory Signed int sum += (2 * count - 32) * mean_val; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; output[output_index] += sum; } } } // -------------- blas.c -------------- #ifdef AVX #ifdef _WIN64 // Windows #include <intrin.h> #else // Linux #include <x86intrin.h> #endif #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #include <emmintrin.h> // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=broad&expand=561 // https://software.intel.com/sites/landingpage/IntrinsicsGuide void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { float A_PART = ALPHA*A[i*lda + k]; __m256 a256, b256, c256, result256; // AVX a256 = _mm256_set1_ps(A_PART); for (j = 0; j < N - 8; j += 8) { b256 = _mm256_loadu_ps(&B[k*ldb + j]); c256 = _mm256_loadu_ps(&C[i*ldc + j]); // FMA - Intel Haswell (2013), AMD Piledriver (2012) //result256 = _mm256_fmadd_ps(a256, b256, c256); result256 = _mm256_mul_ps(a256, b256); result256 = _mm256_add_ps(result256, c256); _mm256_storeu_ps(&C[i*ldc + j], result256); } int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8; for (j = prev_end; j < N; ++j) C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { register uint32_t A_PART = A[i*lda + s]; __m256i a256 = _mm256_set1_epi32(A_PART); for (j = 0; j < N - 8; j += 8) { __m256i b256 = *((__m256i*)&B[s*ldb + j]); __m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b) __m256i all_1 = _mm256_set1_epi8(255); __m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b)) // waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a) __m256 count = _mm256_setr_ps( popcnt_32(_mm256_extract_epi32(xnor256, 0)), popcnt_32(_mm256_extract_epi32(xnor256, 1)), popcnt_32(_mm256_extract_epi32(xnor256, 2)), popcnt_32(_mm256_extract_epi32(xnor256, 3)), popcnt_32(_mm256_extract_epi32(xnor256, 4)), popcnt_32(_mm256_extract_epi32(xnor256, 5)), popcnt_32(_mm256_extract_epi32(xnor256, 6)), popcnt_32(_mm256_extract_epi32(xnor256, 7))); __m256 val2 = _mm256_set1_ps(2); count = _mm256_mul_ps(count, val2); // count * 2 __m256 val32 = _mm256_set1_ps(32); count = _mm256_sub_ps(count, val32); // count - 32 __m256 mean256 = _mm256_set1_ps(mean_val); count = _mm256_mul_ps(count, mean256); // count * mean_val __m256 c256 = *((__m256*)&C[i*ldc + j]); count = _mm256_add_ps(count, c256); // c = c + count *((__m256*)&C[i*ldc + j]) = count; } for (; j < N; ++j) // out_h*out_w; { register uint32_t B_PART = B[s*ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; } } } } #if defined(_MSC_VER) && _MSC_VER <= 1900 static inline __int32 _mm256_extract_epi64(__m256i a, const int index) { return a.m256i_i64[index]; } static inline __int32 _mm256_extract_epi32(__m256i a, const int index) { return a.m256i_i32[index]; } #endif static inline float _castu32_f32(uint32_t a) { return *((float *)&a); } #if defined(_MSC_VER) // Windows static inline float _mm256_extract_float32(__m256 a, const int index) { return a.m256_f32[index]; } #else // Linux static inline float _mm256_extract_float32(__m256 a, const int index) { return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), index)); } #endif //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1)// && is_fma_avx()) { #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); _mm256_storeu_ps(&data_col[col_index], src256); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } else { //printf("\n Error: is no non-optimized version \n"); im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { //__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.00); int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 8) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 //mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 uint16_t *dst_ptr = &((unsigned char*)data_col)[col_index / 8]; *dst_ptr |= (mask << (col_index % 8)); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit(data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i = 0; if (a == LINEAR) { } else if (a == LEAKY) { { __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 all256_01 = _mm256_set1_ps(0.1F); for (i = 0; i < n - 8; i += 8) { //x[i] = (x[i]>0) ? x[i] : .1*x[i]; __m256 src256 = _mm256_loadu_ps(&x[i]); __m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1 __m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats __m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult; _mm256_storeu_ps(&x[i], result256); } } for (; i < n; ++i) { x[i] = (x[i]>0) ? x[i] : .1*x[i]; } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { const int w_offset = -pad / 2; const int h_offset = -pad / 2; int b, k; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { //for (j = 0; j < out_w; ++j) { j = 0; if (stride == 1) { for (j = 0; j < out_w - 8 - (size - 1); j += 8) { int out_index = j + out_w*(i + out_h*(k + c*b)); __m256 max256 = _mm256_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); max256 = _mm256_max_ps(src256, max256); } } _mm256_storeu_ps(&dst[out_index], max256); } } else if (size == 2 && stride == 2) { for (j = 0; j < out_w - 4; j += 4) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; __m128 max128 = _mm_set1_ps(-FLT_MAX); for (n = 0; n < size; ++n) { //for (m = 0; m < size; ++m) m = 0; { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); if (!valid) continue; __m256 src256 = _mm256_loadu_ps(&src[index]); __m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4)); __m256 max256 = _mm256_max_ps(src256, src256_2); __m128 src128_0 = _mm256_extractf128_ps(max256, 0); __m128 src128_1 = _mm256_extractf128_ps(max256, 1); __m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6)); max128 = _mm_max_ps(src128, max128); } } _mm_storeu_ps(&dst[out_index], max128); } } for (; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } // http://graphics.stanford.edu/~seander/bithacks.html // https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register // https://arxiv.org/pdf/1611.07612.pdf static inline int popcnt128(__m128i n) { const __m128i n_hi = _mm_unpackhi_epi64(n, n); #ifdef _MSC_VER return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi)); #else return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi)); #endif } static inline int popcnt256(__m256i n) { return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1)); } static inline __m256i count256(__m256i v) { __m256i lookup = _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4); __m256i low_mask = _mm256_set1_epi8(0x0f); __m256i lo = _mm256_and_si256(v, low_mask); __m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask); __m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo); __m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi); __m256i total = _mm256_add_epi8(popcnt1, popcnt2); return _mm256_sad_epu8(total, _mm256_setzero_si256()); } static inline int popcnt256_custom(__m256i n) { __m256i val = count256(n); //return val.m256i_i64[0] + //val.m256i_i64[1] + //val.m256i_i64[2] + //val.m256i_i64[3]; return _mm256_extract_epi64(val, 0) + _mm256_extract_epi64(val, 1) + _mm256_extract_epi64(val, 2) + _mm256_extract_epi64(val, 3); } // 5x times faster than gemm()-float32 // further optimizations: do mean-mult only for the last layer void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { #if defined(_OPENMP) static int max_num_threads = 0; if (max_num_threads == 0) { max_num_threads = omp_get_max_threads(); //omp_set_num_threads(max_num_threads / 2); } #endif int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] float mean_val = mean_arr[i]; int j, k; __m256i all_1 = _mm256_set1_epi8(255); for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; const int bit_step = 256; __m256i count_sum = _mm256_set1_epi8(0); for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216] __m256i a_bit256 = _mm256_loadu_si256((__m256i *)(A + (i*lda + k) / 8)); __m256i b_bit256 = _mm256_loadu_si256((__m256i *)(B + (j*ldb + k) / 8)); __m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b)) __m256i c_bit256 = _mm256_andnot_si256(xor256, all_1); // can be optimized - we can do other NOT for wegihts once and do not do this NOT count_sum = _mm256_add_epi64(count256(c_bit256), count_sum); // Mula�s algorithm //count += popcnt256(c_bit256); //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } // count of 1 bits //count = count_sum.m256i_i64[0] + // count_sum.m256i_i64[1] + // count_sum.m256i_i64[2] + // count_sum.m256i_i64[3]; count = _mm256_extract_epi64(count_sum, 0) + _mm256_extract_epi64(count_sum, 1) + _mm256_extract_epi64(count_sum, 2) + _mm256_extract_epi64(count_sum, 3); int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step)); count = count - f1; // remove extra bits (from empty space for align only) C[i*ldc + j] = (2 * count - K) * mean_val; } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; //__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000); __m256 float_zero256 = _mm256_set1_ps(0.0); for (i = 0; i < size; i += 8) { //__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i])); //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats //uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1 ////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0 __m256 src256 = _mm256_loadu_ps((float *)(&src[i])); __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS); uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1 dst[i / 8] = mask; } } #else // AVX void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA, uint32_t *A, int lda, uint32_t *B, int ldb, float *C, int ldc, float *mean_arr) { int i; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n int j, s; float mean_val = mean_arr[i]; for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c) { register uint32_t A_PART = A[i*lda + s]; for (j = 0; j < N; ++j) // out_h*out_w; { register uint32_t B_PART = B[s*ldb + j]; uint32_t xnor_result = ~(A_PART ^ B_PART); int32_t count = popcnt_32(xnor_result); // must be Signed int C[i*ldc + j] += (2 * count - 32) * mean_val; } } } } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col) { im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); } //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_custom_bin(float* data_im, int channels, int height, int width, int ksize, int stride, int pad, float* data_col, int bit_align) { int c; const int height_col = (height + 2 * pad - ksize) / stride + 1; const int width_col = (width + 2 * pad - ksize) / stride + 1; const int channels_col = channels * ksize * ksize; // optimized version if (height_col == height && width_col == width && stride == 1 && pad == 1) { int new_ldb = bit_align; #pragma omp parallel for for (c = 0; c < channels_col; ++c) { int h, w; int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = pad; h < height_col - pad; ++h) { for (w = pad; w < width_col - pad - 8; w += 1) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit(data_col, col_index); } for (; w < width_col - pad; ++w) { int im_row = h_offset + h - pad; int im_col = w_offset + w - pad; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)]; float val = data_im[im_col + width*(im_row + height*c_im)]; if (val > 0) set_bit(data_col, col_index); } } { w = 0; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { w = width_col - 1; for (h = 0; h < height_col; ++h) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { h = 0; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } { h = height_col - 1; for (w = 0; w < width_col; ++w) { int im_row = h_offset + h; int im_col = w_offset + w; //int col_index = (c * height_col + h) * width_col + w; int col_index = c * new_ldb + h * width_col + w; //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad); if (val > 0) set_bit(data_col, col_index); } } } } else { printf("\n Error: is no non-optimized version \n"); //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin // float_to_bit(b, t_input, src_size); // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8); } } void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a) { int i = 0; if (a == LINEAR) {} else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c, int pad, int stride, int batch) { int b, k; const int w_offset = -pad / 2; const int h_offset = -pad / 2; for (b = 0; b < batch; ++b) { #pragma omp parallel for for (k = 0; k < c; ++k) { int i, j, m, n; for (i = 0; i < out_h; ++i) { for (j = 0; j < out_w; ++j) { int out_index = j + out_w*(i + out_h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < size; ++n) { for (m = 0; m < size; ++m) { int cur_h = h_offset + i*stride + n; int cur_w = w_offset + j*stride + m; int index = cur_w + w*(cur_h + h*(k + b*c)); int valid = (cur_h >= 0 && cur_h < h && cur_w >= 0 && cur_w < w); float val = (valid != 0) ? src[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } dst[out_index] = max; indexes[out_index] = max_i; } } } } } static inline int popcnt_64(uint64_t val64) { #ifdef WIN32 // Windows #ifdef _WIN64 // Windows 64-bit int tmp_count = __popcnt64(val64); #else // Windows 32-bit int tmp_count = __popcnt(val64); tmp_count += __popcnt(val64 >> 32); #endif #else // Linux #ifdef __x86_64__ // Linux 64-bit int tmp_count = __builtin_popcountll(val64); #else // Linux 32-bit int tmp_count = __builtin_popcount(val64); tmp_count += __builtin_popcount(val64); #endif #endif return tmp_count; } void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED, unsigned char *A, int lda, unsigned char *B, int ldb, float *C, int ldc, float *mean_arr) { int i, j, k, h; #pragma omp parallel for for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024] float mean_val = mean_arr[i]; for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056] int count = 0; for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216] uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64); int tmp_count = popcnt_64(c_bit64); if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits count += tmp_count; //binary_int64_printf(c_bit64); //printf(", count = %d \n\n", tmp_count); } C[i*ldc + j] = (2 * count - K) * mean_val; } } } void float_to_bit(float *src, unsigned char *dst, size_t size) { size_t dst_size = size / 8 + 1; memset(dst, 0, dst_size); size_t i; char *byte_arr = calloc(size, sizeof(char)); for (i = 0; i < size; ++i) { if (src[i] > 0) byte_arr[i] = 1; } //for (i = 0; i < size; ++i) { // dst[i / 8] |= byte_arr[i] << (i % 8); //} for (i = 0; i < size; i += 8) { char dst_tmp = 0; dst_tmp |= byte_arr[i + 0] << 0; dst_tmp |= byte_arr[i + 1] << 1; dst_tmp |= byte_arr[i + 2] << 2; dst_tmp |= byte_arr[i + 3] << 3; dst_tmp |= byte_arr[i + 4] << 4; dst_tmp |= byte_arr[i + 5] << 5; dst_tmp |= byte_arr[i + 6] << 6; dst_tmp |= byte_arr[i + 7] << 7; dst[i / 8] = dst_tmp; } free(byte_arr); } #endif // __x86_64 /* void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } */ void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; for (i = 0; i < N; ++i) X[i*INCX] = ALPHA; } // -------------- utils.c -------------- // utils.c void error(const char *s) { perror(s); assert(0); exit(-1); } // utils.c void malloc_error() { fprintf(stderr, "Malloc error\n"); exit(-1); } // utils.c void file_error(char *s) { fprintf(stderr, "Couldn't open file: %s\n", s); exit(0); } // utils.c char *fgetl(FILE *fp) { if (feof(fp)) return 0; size_t size = 512; char *line = malloc(size * sizeof(char)); if (!fgets(line, size, fp)) { free(line); return 0; } size_t curr = strlen(line); while ((line[curr - 1] != '\n') && !feof(fp)) { if (curr == size - 1) { size *= 2; line = realloc(line, size * sizeof(char)); if (!line) { printf("%ld\n", (int long)size); malloc_error(); } } size_t readsize = size - curr; if (readsize > INT_MAX) readsize = INT_MAX - 1; fgets(&line[curr], readsize, fp); curr = strlen(line); } if (line[curr - 1] == '\n') line[curr - 1] = '\0'; return line; } // utils.c int *read_map(char *filename) { int n = 0; int *map = 0; char *str; FILE *file = fopen(filename, "r"); if (!file) file_error(filename); while ((str = fgetl(file))) { ++n; map = realloc(map, n * sizeof(int)); map[n - 1] = atoi(str); } return map; } // utils.c void del_arg(int argc, char **argv, int index) { int i; for (i = index; i < argc - 1; ++i) argv[i] = argv[i + 1]; argv[i] = 0; } // utils.c int find_arg(int argc, char* argv[], char *arg) { int i; for (i = 0; i < argc; ++i) { if (!argv[i]) continue; if (0 == strcmp(argv[i], arg)) { del_arg(argc, argv, i); return 1; } } return 0; } // utils.c int find_int_arg(int argc, char **argv, char *arg, int def) { int i; for (i = 0; i < argc - 1; ++i) { if (!argv[i]) continue; if (0 == strcmp(argv[i], arg)) { def = atoi(argv[i + 1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } // utils.c float find_float_arg(int argc, char **argv, char *arg, float def) { int i; for (i = 0; i < argc - 1; ++i) { if (!argv[i]) continue; if (0 == strcmp(argv[i], arg)) { def = atof(argv[i + 1]); del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } // utils.c char *find_char_arg(int argc, char **argv, char *arg, char *def) { int i; for (i = 0; i < argc - 1; ++i) { if (!argv[i]) continue; if (0 == strcmp(argv[i], arg)) { def = argv[i + 1]; del_arg(argc, argv, i); del_arg(argc, argv, i); break; } } return def; } // utils.c void strip(char *s) { size_t i; size_t len = strlen(s); size_t offset = 0; for (i = 0; i < len; ++i) { char c = s[i]; if (c == ' ' || c == '\t' || c == '\n' || c == '\r') ++offset; else s[i - offset] = c; } s[len - offset] = '\0'; } // utils.c void list_insert(list *l, void *val) { node *new = malloc(sizeof(node)); new->val = val; new->next = 0; if (!l->back) { l->front = new; new->prev = 0; } else { l->back->next = new; new->prev = l->back; } l->back = new; ++l->size; } // utils.c float rand_uniform(float min, float max) { if (max < min) { float swap = min; min = max; max = swap; } return ((float)rand() / RAND_MAX * (max - min)) + min; } // utils.c float rand_scale(float s) { float scale = rand_uniform(1, s); if (rand() % 2) return scale; return 1. / scale; } // utils.c int rand_int(int min, int max) { if (max < min) { int s = min; min = max; max = s; } int r = (rand() % (max - min + 1)) + min; return r; } // utils.c int constrain_int(int a, int min, int max) { if (a < min) return min; if (a > max) return max; return a; } // utils.c float dist_array(float *a, float *b, int n, int sub) { int i; float sum = 0; for (i = 0; i < n; i += sub) sum += powf(a[i] - b[i], 2); return sqrt(sum); } // utils.c float mag_array(float *a, int n) { int i; float sum = 0; for (i = 0; i < n; ++i) { sum += a[i] * a[i]; } return sqrt(sum); } // utils.c int max_index(float *a, int n) { if (n <= 0) return -1; int i, max_i = 0; float max = a[0]; for (i = 1; i < n; ++i) { if (a[i] > max) { max = a[i]; max_i = i; } } return max_i; } // utils.c // From http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform float rand_normal() { static int haveSpare = 0; static double rand1, rand2; if (haveSpare) { haveSpare = 0; return sqrt(rand1) * sin(rand2); } haveSpare = 1; rand1 = rand() / ((double)RAND_MAX); if (rand1 < 1e-100) rand1 = 1e-100; rand1 = -2 * log(rand1); rand2 = (rand() / ((double)RAND_MAX)) * TWO_PI; return sqrt(rand1) * cos(rand2); } // utils.c void free_ptrs(void **ptrs, int n) { int i; for (i = 0; i < n; ++i) free(ptrs[i]); free(ptrs); } // -------------- tree.c -------------- // tree.c void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves) { int j; for (j = 0; j < n; ++j) { int parent = hier->parent[j]; if (parent >= 0) { predictions[j] *= predictions[parent]; } } if (only_leaves) { for (j = 0; j < n; ++j) { if (!hier->leaf[j]) predictions[j] = 0; } } } // tree.c tree *read_tree(char *filename) { tree t = { 0 }; FILE *fp = fopen(filename, "r"); char *line; int last_parent = -1; int group_size = 0; int groups = 0; int n = 0; while ((line = fgetl(fp)) != 0) { char *id = calloc(256, sizeof(char)); int parent = -1; sscanf(line, "%s %d", id, &parent); t.parent = realloc(t.parent, (n + 1) * sizeof(int)); t.parent[n] = parent; t.name = realloc(t.name, (n + 1) * sizeof(char *)); t.name[n] = id; if (parent != last_parent) { ++groups; t.group_offset = realloc(t.group_offset, groups * sizeof(int)); t.group_offset[groups - 1] = n - group_size; t.group_size = realloc(t.group_size, groups * sizeof(int)); t.group_size[groups - 1] = group_size; group_size = 0; last_parent = parent; } t.group = realloc(t.group, (n + 1) * sizeof(int)); t.group[n] = groups; ++n; ++group_size; } ++groups; t.group_offset = realloc(t.group_offset, groups * sizeof(int)); t.group_offset[groups - 1] = n - group_size; t.group_size = realloc(t.group_size, groups * sizeof(int)); t.group_size[groups - 1] = group_size; t.n = n; t.groups = groups; t.leaf = calloc(n, sizeof(int)); int i; for (i = 0; i < n; ++i) t.leaf[i] = 1; for (i = 0; i < n; ++i) if (t.parent[i] >= 0) t.leaf[t.parent[i]] = 0; fclose(fp); tree *tree_ptr = calloc(1, sizeof(tree)); *tree_ptr = t; //error(0); return tree_ptr; } // -------------- list.c -------------- // list.c list *make_list() { list *l = malloc(sizeof(list)); l->size = 0; l->front = 0; l->back = 0; return l; } // list.c list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if (!file) file_error(filename); list *lines = make_list(); while ((path = fgetl(file))) { list_insert(lines, path); } fclose(file); return lines; } // list.c void **list_to_array(list *l) { void **a = calloc(l->size, sizeof(void*)); int count = 0; node *n = l->front; while (n) { a[count++] = n->val; n = n->next; } return a; } // list.c void free_node(node *n) { node *next; while (n) { next = n->next; free(n); n = next; } } // list.c void free_list(list *l) { free_node(l->front); free(l); } // list.c char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } // -------------- network.c -------------- // network.c float *get_network_output(network net) { int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].output; } // network.c int get_network_output_size(network net) { int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].outputs; } // network.c network make_network(int n) { network net = { 0 }; net.n = n; net.layers = calloc(net.n, sizeof(layer)); net.seen = calloc(1, sizeof(uint64_t)); return net; } void free_network(network net) { int i; for (i = 0; i < net.n; ++i) { free_layer(net.layers[i]); } free(net.layers); free(net.scales); free(net.steps); free(net.seen); free(net.workspace); } // network.c void set_batch_network(network *net, int b) { net->batch = b; int i; for (i = 0; i < net->n; ++i) { layer l = net->layers[i]; l.batch = b; } } // -------------- layer.c -------------- void free_layer(layer l) { if (l.type == DROPOUT) { if (l.rand) free(l.rand); return; } if (l.cweights) free(l.cweights); if (l.indexes) free(l.indexes); if (l.input_layers) free(l.input_layers); if (l.input_sizes) free(l.input_sizes); if (l.map) free(l.map); if (l.rand) free(l.rand); if (l.cost) free(l.cost); if (l.state) free(l.state); if (l.prev_state) free(l.prev_state); if (l.forgot_state) free(l.forgot_state); if (l.forgot_delta) free(l.forgot_delta); if (l.state_delta) free(l.state_delta); if (l.concat) free(l.concat); if (l.concat_delta) free(l.concat_delta); if (l.biases) free(l.biases); if (l.biases_quant) free(l.biases_quant); if (l.scales) free(l.scales); if (l.weights) free(l.weights); if (l.weights_int8) free(l.weights_int8); if (l.align_bit_weights) free(l.align_bit_weights); if (l.mean_arr) free(l.mean_arr); if (l.output) free(l.output); if (l.output_int8) free(l.output_int8); if (l.squared) free(l.squared); if (l.norms) free(l.norms); if (l.spatial_mean) free(l.spatial_mean); if (l.mean) free(l.mean); if (l.variance) free(l.variance); if (l.rolling_mean) free(l.rolling_mean); if (l.rolling_variance) free(l.rolling_variance); if (l.x) free(l.x); if (l.x_norm) free(l.x_norm); if (l.m) free(l.m); if (l.v) free(l.v); if (l.z_cpu) free(l.z_cpu); if (l.r_cpu) free(l.r_cpu); if (l.h_cpu) free(l.h_cpu); if (l.mask) free(l.mask); } // -------------- softmax_layer.c -------------- // softmax_layer.c softmax_layer make_softmax_layer(int batch, int inputs, int groups) { assert(inputs%groups == 0); fprintf(stderr, "softmax %4d\n", inputs); softmax_layer l = { 0 }; l.type = SOFTMAX; l.batch = batch; l.groups = groups; l.inputs = inputs; l.outputs = inputs; l.output = calloc(inputs*batch, sizeof(float)); return l; } // -------------- upsample_layer.c -------------- // upsample_layer.c layer make_upsample_layer(int batch, int w, int h, int c, int stride) { layer l = { 0 }; l.type = UPSAMPLE; l.batch = batch; l.w = w; l.h = h; l.c = c; l.out_w = w*stride; l.out_h = h*stride; l.out_c = c; if (stride < 0) { stride = -stride; l.reverse = 1; l.out_w = w / stride; l.out_h = h / stride; } l.stride = stride; l.outputs = l.out_w*l.out_h*l.out_c; l.inputs = l.w*l.h*l.c; l.output = calloc(l.outputs*batch, sizeof(float));; if (l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } // -------------- shortcut_layer.c -------------- // shortcut.c layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2) { fprintf(stderr, "Shortcut Layer: %d\n", index); layer l = { 0 }; l.type = SHORTCUT; l.batch = batch; l.w = w2; l.h = h2; l.c = c2; l.out_w = w; l.out_h = h; l.out_c = c; l.outputs = w*h*c; l.inputs = l.outputs; l.index = index; l.output = calloc(l.outputs*batch, sizeof(float)); return l; } // -------------- reorg_layer.c -------------- // reorg_layer.c layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse) { layer l = { 0 }; l.type = REORG; l.batch = batch; l.stride = stride; l.h = h; l.w = w; l.c = c; if (reverse) { l.out_w = w*stride; l.out_h = h*stride; l.out_c = c / (stride*stride); } else { l.out_w = w / stride; l.out_h = h / stride; l.out_c = c*(stride*stride); } l.reverse = reverse; fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c); l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; int output_size = l.out_h * l.out_w * l.out_c * batch; l.output = calloc(output_size, sizeof(float)); l.output_int8 = calloc(output_size, sizeof(int8_t)); return l; } // -------------- route_layer.c -------------- // route_layer.c route_layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes) { fprintf(stderr, "route "); route_layer l = { 0 }; l.type = ROUTE; l.batch = batch; l.n = n; l.input_layers = input_layers; l.input_sizes = input_sizes; int i; int outputs = 0; for (i = 0; i < n; ++i) { fprintf(stderr, " %d", input_layers[i]); outputs += input_sizes[i]; } fprintf(stderr, "\n"); l.outputs = outputs; l.inputs = outputs; //l.delta = calloc(outputs*batch, sizeof(float)); l.output = calloc(outputs*batch, sizeof(float)); l.output_int8 = calloc(outputs*batch, sizeof(int8_t)); return l; } // -------------- yolo_layer.c -------------- layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes, int max_boxes) { int i; layer l = { 0 }; l.type = YOLO; l.n = n; l.total = total; l.batch = batch; l.h = h; l.w = w; l.c = n*(classes + 4 + 1); l.out_w = l.w; l.out_h = l.h; l.out_c = l.c; l.classes = classes; l.cost = calloc(1, sizeof(float)); l.biases = calloc(total * 2, sizeof(float)); if (mask) l.mask = mask; else { l.mask = calloc(n, sizeof(int)); for (i = 0; i < n; ++i) { l.mask[i] = i; } } l.outputs = h*w*n*(classes + 4 + 1); l.inputs = l.outputs; l.max_boxes = max_boxes; l.truths = l.max_boxes*(4 + 1); // 90*(4 + 1); l.output = calloc(batch*l.outputs, sizeof(float)); for (i = 0; i < total * 2; ++i) { l.biases[i] = .5; } fprintf(stderr, "yolo\n"); srand(0); return l; } // -------------- region_layer.c -------------- // region_layer.c region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords) { region_layer l = { 0 }; l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.classes = classes; l.coords = coords; l.cost = calloc(1, sizeof(float)); l.biases = calloc(n * 2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.truths = 30 * (5); l.output = calloc(batch*l.outputs, sizeof(float)); int i; for (i = 0; i < n * 2; ++i) { l.biases[i] = .5; } fprintf(stderr, "detection\n"); srand(0); return l; } // -------------- maxpool_layer.c -------------- // maxpool_layer.c maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding) { maxpool_layer l = { 0 }; l.type = MAXPOOL; l.batch = batch; l.h = h; l.w = w; l.c = c; l.pad = padding; l.out_w = (w + padding - size) / stride + 1; l.out_h = (h + padding - size) / stride + 1; l.out_c = c; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; l.size = size; l.stride = stride; int output_size = l.out_h * l.out_w * l.out_c * batch; l.indexes = calloc(output_size, sizeof(int)); l.output = calloc(output_size, sizeof(float)); l.output_int8 = calloc(output_size, sizeof(int8_t)); fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c); return l; } // -------------- convolutional_layer.c -------------- // convolutional_layer.c size_t get_workspace_size(layer l) { return (size_t)l.out_h*l.out_w*l.size*l.size*l.c * sizeof(float); } int convolutional_out_height(convolutional_layer l) { return (l.h + 2 * l.pad - l.size) / l.stride + 1; } int convolutional_out_width(convolutional_layer l) { return (l.w + 2 * l.pad - l.size) / l.stride + 1; } // convolutional_layer.c convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int adam, int quantized) { int i; convolutional_layer l = { 0 }; l.type = CONVOLUTIONAL; l.quantized = quantized; l.h = h; l.w = w; l.c = c; l.n = n; l.batch = batch; l.stride = stride; l.size = size; l.pad = padding; l.batch_normalize = batch_normalize; l.weights = calloc(c*n*size*size, sizeof(float)); l.weights_int8 = calloc(c*n*size*size, sizeof(int8_t)); l.biases = calloc(n, sizeof(float)); l.biases_quant = calloc(n, sizeof(float)); float scale = sqrt(2. / (size*size*c)); for (i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1); int out_h = convolutional_out_height(l); int out_w = convolutional_out_width(l); l.out_h = out_h; l.out_w = out_w; l.out_c = n; l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = l.w * l.h * l.c; l.output = calloc(l.batch*l.outputs, sizeof(float)); l.output_int8 = calloc(l.batch*l.outputs, sizeof(int8_t)); if (batch_normalize) { l.scales = calloc(n, sizeof(float)); for (i = 0; i < n; ++i) { l.scales[i] = 1; } l.mean = calloc(n, sizeof(float)); l.variance = calloc(n, sizeof(float)); l.rolling_mean = calloc(n, sizeof(float)); l.rolling_variance = calloc(n, sizeof(float)); l.x = calloc(l.batch*l.outputs, sizeof(float)); l.x_norm = calloc(l.batch*l.outputs, sizeof(float)); } if (adam) { l.adam = 1; l.m = calloc(c*n*size*size, sizeof(float)); l.v = calloc(c*n*size*size, sizeof(float)); } l.workspace_size = get_workspace_size(l); l.activation = activation; l.bflops = (2.0 * l.n * l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.; fprintf(stderr, "conv "); fprintf(stderr, "%5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); return l; } // -------------- image.c -------------- // image.c void rgbgr_image(image im) { int i; for (i = 0; i < im.w*im.h; ++i) { float swap = im.data[i]; im.data[i] = im.data[i + im.w*im.h * 2]; im.data[i + im.w*im.h * 2] = swap; } } // image.c image make_empty_image(int w, int h, int c) { image out; out.data = 0; out.h = h; out.w = w; out.c = c; return out; } // image.c void free_image(image m) { if (m.data) { free(m.data); } } // image.c void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b) { //normalize_image(a); int i; if (x1 < 0) x1 = 0; if (x1 >= a.w) x1 = a.w - 1; if (x2 < 0) x2 = 0; if (x2 >= a.w) x2 = a.w - 1; if (y1 < 0) y1 = 0; if (y1 >= a.h) y1 = a.h - 1; if (y2 < 0) y2 = 0; if (y2 >= a.h) y2 = a.h - 1; for (i = x1; i <= x2; ++i) { a.data[i + y1*a.w + 0 * a.w*a.h] = r; a.data[i + y2*a.w + 0 * a.w*a.h] = r; a.data[i + y1*a.w + 1 * a.w*a.h] = g; a.data[i + y2*a.w + 1 * a.w*a.h] = g; a.data[i + y1*a.w + 2 * a.w*a.h] = b; a.data[i + y2*a.w + 2 * a.w*a.h] = b; } for (i = y1; i <= y2; ++i) { a.data[x1 + i*a.w + 0 * a.w*a.h] = r; a.data[x2 + i*a.w + 0 * a.w*a.h] = r; a.data[x1 + i*a.w + 1 * a.w*a.h] = g; a.data[x2 + i*a.w + 1 * a.w*a.h] = g; a.data[x1 + i*a.w + 2 * a.w*a.h] = b; a.data[x2 + i*a.w + 2 * a.w*a.h] = b; } } // image.c void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b) { int i; for (i = 0; i < w; ++i) { draw_box(a, x1 + i, y1 + i, x2 - i, y2 - i, r, g, b); } } // image.c image make_image(int w, int h, int c) { image out = make_empty_image(w, h, c); out.data = calloc(h*w*c, sizeof(float)); return out; } // image.c float get_pixel(image m, int x, int y, int c) { assert(x < m.w && y < m.h && c < m.c); return m.data[c*m.h*m.w + y*m.w + x]; } // image.c void set_pixel(image m, int x, int y, int c, float val) { if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return; assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] = val; } // image.c void add_pixel(image m, int x, int y, int c, float val) { assert(x < m.w && y < m.h && c < m.c); m.data[c*m.h*m.w + y*m.w + x] += val; } // image.c image resize_image(image im, int w, int h) { image resized = make_image(w, h, im.c); image part = make_image(w, im.h, im.c); int r, c, k; float w_scale = (float)(im.w - 1) / (w - 1); float h_scale = (float)(im.h - 1) / (h - 1); for (k = 0; k < im.c; ++k) { for (r = 0; r < im.h; ++r) { for (c = 0; c < w; ++c) { float val = 0; if (c == w - 1 || im.w == 1) { val = get_pixel(im, im.w - 1, r, k); } else { float sx = c*w_scale; int ix = (int)sx; float dx = sx - ix; val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix + 1, r, k); } set_pixel(part, c, r, k, val); } } } for (k = 0; k < im.c; ++k) { for (r = 0; r < h; ++r) { float sy = r*h_scale; int iy = (int)sy; float dy = sy - iy; for (c = 0; c < w; ++c) { float val = (1 - dy) * get_pixel(part, c, iy, k); set_pixel(resized, c, r, k, val); } if (r == h - 1 || im.h == 1) continue; for (c = 0; c < w; ++c) { float val = dy * get_pixel(part, c, iy + 1, k); add_pixel(resized, c, r, k, val); } } } free_image(part); return resized; } // image.c image load_image(char *filename, int w, int h, int c) { image out = load_image_stb(filename, c); if ((h && w) && (h != out.h || w != out.w)) { image resized = resize_image(out, w, h); free_image(out); out = resized; } return out; } // image.c image load_image_stb(char *filename, int channels) { int w, h, c; unsigned char *data = stbi_load(filename, &w, &h, &c, channels); if (!data) { fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason()); exit(0); } if (channels) c = channels; int i, j, k; image im = make_image(w, h, c); for (k = 0; k < c; ++k) { for (j = 0; j < h; ++j) { for (i = 0; i < w; ++i) { int dst_index = i + w*j + w*h*k; int src_index = k + c*i + c*w*j; im.data[dst_index] = (float)data[src_index] / 255.; } } } free(data); return im; } // image.c image copy_image(image p) { image copy = p; copy.data = calloc(p.h*p.w*p.c, sizeof(float)); memcpy(copy.data, p.data, p.h*p.w*p.c * sizeof(float)); return copy; } // image.c void constrain_image(image im) { int i; for (i = 0; i < im.w*im.h*im.c; ++i) { if (im.data[i] < 0) im.data[i] = 0; if (im.data[i] > 1) im.data[i] = 1; } } // image.c void save_image_png(image im, const char *name) { char buff[256]; sprintf(buff, "%s.png", name); unsigned char *data = calloc(im.w*im.h*im.c, sizeof(char)); int i, k; for (k = 0; k < im.c; ++k) { for (i = 0; i < im.w*im.h; ++i) { data[i*im.c + k] = (unsigned char)(255 * im.data[i + k*im.w*im.h]); } } int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c); free(data); if (!success) fprintf(stderr, "Failed to write image %s\n", buff); } // image.c void show_image(image p, const char *name) { fprintf(stderr, "Not compiled with OpenCV, saving to %s.png instead\n", name); save_image_png(p, name); } // image.c float get_color(int c, int x, int max) { static float colors[6][3] = { { 1,0,1 },{ 0,0,1 },{ 0,1,1 },{ 0,1,0 },{ 1,1,0 },{ 1,0,0 } }; float ratio = ((float)x / max) * 5; int i = floor(ratio); int j = ceil(ratio); ratio -= i; float r = (1 - ratio) * colors[i][c] + ratio*colors[j][c]; //printf("%f\n", r); return r; } // -------------- option_list.c -------------------- // option_list.c typedef struct { char *key; char *val; int used; } kvp; // option_list.c void option_insert(list *l, char *key, char *val) { kvp *p = malloc(sizeof(kvp)); p->key = key; p->val = val; p->used = 0; list_insert(l, p); } // option_list.c int read_option(char *s, list *options) { size_t i; size_t len = strlen(s); char *val = 0; for (i = 0; i < len; ++i) { if (s[i] == '=') { s[i] = '\0'; val = s + i + 1; break; } } if (i == len - 1) return 0; char *key = s; option_insert(options, key, val); return 1; } // option_list.c list *read_data_cfg(char *filename) { FILE *file = fopen(filename, "r"); if (file == 0) file_error(filename); char *line; int nu = 0; list *options = make_list(); while ((line = fgetl(file)) != 0) { ++nu; strip(line); switch (line[0]) { case '\0': case '#': case ';': free(line); break; default: if (!read_option(line, options)) { fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return options; } // option_list.c void option_unused(list *l) { node *n = l->front; while (n) { kvp *p = (kvp *)n->val; if (!p->used) { fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val); } n = n->next; } } // option_list.c char *option_find(list *l, char *key) { node *n = l->front; while (n) { kvp *p = (kvp *)n->val; if (strcmp(p->key, key) == 0) { p->used = 1; return p->val; } n = n->next; } return 0; } // option_list.c char *option_find_str(list *l, char *key, char *def) { char *v = option_find(l, key); if (v) return v; if (def) fprintf(stderr, "%s: Using default '%s'\n", key, def); return def; } // option_list.c int option_find_int(list *l, char *key, int def) { char *v = option_find(l, key); if (v) return atoi(v); fprintf(stderr, "%s: Using default '%d'\n", key, def); return def; } // option_list.c int option_find_int_quiet(list *l, char *key, int def) { char *v = option_find(l, key); if (v) return atoi(v); return def; } // option_list.c float option_find_float_quiet(list *l, char *key, float def) { char *v = option_find(l, key); if (v) return atof(v); return def; } // option_list.c float option_find_float(list *l, char *key, float def) { char *v = option_find(l, key); if (v) return atof(v); fprintf(stderr, "%s: Using default '%lf'\n", key, def); return def; } // -------------- parser.c -------------------- // parser.c typedef struct size_params { int quantized; int batch; int inputs; int h; int w; int c; int index; int time_steps; network net; } size_params; // parser.c typedef struct { char *type; list *options; }section; // parser.c list *read_cfg(char *filename) { FILE *file = fopen(filename, "r"); if (file == 0) file_error(filename); char *line; int nu = 0; list *sections = make_list(); section *current = 0; while ((line = fgetl(file)) != 0) { ++nu; strip(line); switch (line[0]) { case '[': current = malloc(sizeof(section)); list_insert(sections, current); current->options = make_list(); current->type = line; break; case '\0': case '#': case ';': free(line); break; default: if (!read_option(line, current->options)) { fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line); free(line); } break; } } fclose(file); return sections; } // parser.c void load_convolutional_weights_cpu(layer l, FILE *fp) { int num = l.n*l.c*l.size*l.size; fread(l.biases, sizeof(float), l.n, fp); if (l.batch_normalize && (!l.dontloadscales)) { fread(l.scales, sizeof(float), l.n, fp); fread(l.rolling_mean, sizeof(float), l.n, fp); fread(l.rolling_variance, sizeof(float), l.n, fp); } fread(l.weights, sizeof(float), num, fp); } // parser.c void load_weights_upto_cpu(network *net, char *filename, int cutoff) { fprintf(stderr, "Loading weights from %s...", filename); fflush(stdout); FILE *fp = fopen(filename, "rb"); if (!fp) file_error(filename); int major; int minor; int revision; fread(&major, sizeof(int), 1, fp); fread(&minor, sizeof(int), 1, fp); fread(&revision, sizeof(int), 1, fp); if ((major * 10 + minor) >= 2) { fread(net->seen, sizeof(uint64_t), 1, fp); } else { int iseen = 0; fread(&iseen, sizeof(int), 1, fp); *net->seen = iseen; } //int transpose = (major > 1000) || (minor > 1000); int i; for (i = 0; i < net->n && i < cutoff; ++i) { layer l = net->layers[i]; if (l.dontload) continue; if (l.type == CONVOLUTIONAL) { load_convolutional_weights_cpu(l, fp); } } fprintf(stderr, "Done!\n"); fclose(fp); } // parser.c convolutional_layer parse_convolutional(list *options, size_params params) { int n = option_find_int(options, "filters", 1); int size = option_find_int(options, "size", 1); int stride = option_find_int(options, "stride", 1); int pad = option_find_int_quiet(options, "pad", 0); int padding = option_find_int_quiet(options, "padding", 0); if (pad) padding = size / 2; char *activation_s = option_find_str(options, "activation", "logistic"); ACTIVATION activation = get_activation(activation_s); int batch, h, w, c; h = params.h; w = params.w; c = params.c; batch = params.batch; if (!(h && w && c)) error("Layer before convolutional layer must output image."); int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0); int quantized = params.quantized; if (params.index == 0 || activation == LINEAR || (params.index > 1 && stride>1) || size==1) quantized = 0; // disable Quantized for 1st and last layers convolutional_layer layer = make_convolutional_layer(batch, h, w, c, n, size, stride, padding, activation, batch_normalize, params.net.adam, quantized); layer.flipped = option_find_int_quiet(options, "flipped", 0); layer.dot = option_find_float_quiet(options, "dot", 0); if (params.net.adam) { layer.B1 = params.net.B1; layer.B2 = params.net.B2; layer.eps = params.net.eps; } return layer; } // parser.c layer parse_region(list *options, size_params params) { int coords = option_find_int(options, "coords", 4); int classes = option_find_int(options, "classes", 20); int num = option_find_int(options, "num", 1); layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords); assert(l.outputs == params.inputs); l.log = option_find_int_quiet(options, "log", 0); l.sqrt = option_find_int_quiet(options, "sqrt", 0); l.softmax = option_find_int(options, "softmax", 0); l.max_boxes = option_find_int_quiet(options, "max", 30); l.jitter = option_find_float(options, "jitter", .2); l.rescore = option_find_int_quiet(options, "rescore", 0); l.thresh = option_find_float(options, "thresh", .5); l.classfix = option_find_int_quiet(options, "classfix", 0); l.absolute = option_find_int_quiet(options, "absolute", 0); l.random = option_find_int_quiet(options, "random", 0); l.coord_scale = option_find_float(options, "coord_scale", 1); l.object_scale = option_find_float(options, "object_scale", 1); l.noobject_scale = option_find_float(options, "noobject_scale", 1); l.class_scale = option_find_float(options, "class_scale", 1); l.bias_match = option_find_int_quiet(options, "bias_match", 0); char *tree_file = option_find_str(options, "tree", 0); if (tree_file) l.softmax_tree = read_tree(tree_file); char *map_file = option_find_str(options, "map", 0); if (map_file) l.map = read_map(map_file); char *a = option_find_str(options, "anchors", 0); if (a) { int len = strlen(a); int n = 1; int i; for (i = 0; i < len; ++i) { if (a[i] == ',') ++n; } for (i = 0; i < n; ++i) { float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',') + 1; } } return l; } // parser.c int *parse_yolo_mask(char *a, int *num) { int *mask = 0; if (a) { int len = strlen(a); int n = 1; int i; for (i = 0; i < len; ++i) { if (a[i] == ',') ++n; } mask = calloc(n, sizeof(int)); for (i = 0; i < n; ++i) { int val = atoi(a); mask[i] = val; a = strchr(a, ',') + 1; } *num = n; } return mask; } // parser.c layer parse_yolo(list *options, size_params params) { int classes = option_find_int(options, "classes", 20); int total = option_find_int(options, "num", 1); int num = total; char *a = option_find_str(options, "mask", 0); int *mask = parse_yolo_mask(a, &num); int max_boxes = option_find_int_quiet(options, "max", 90); layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes, max_boxes); if (l.outputs != params.inputs) { printf("Error: l.outputs == params.inputs \n"); printf("filters= in the [convolutional]-layer doesn't correspond to classes= or mask= in [yolo]-layer \n"); exit(EXIT_FAILURE); } //assert(l.outputs == params.inputs); char *map_file = option_find_str(options, "map", 0); if (map_file) l.map = read_map(map_file); l.jitter = option_find_float(options, "jitter", .2); l.focal_loss = option_find_int_quiet(options, "focal_loss", 0); l.ignore_thresh = option_find_float(options, "ignore_thresh", .5); l.truth_thresh = option_find_float(options, "truth_thresh", 1); l.random = option_find_int_quiet(options, "random", 0); a = option_find_str(options, "anchors", 0); if (a) { int len = strlen(a); int n = 1; int i; for (i = 0; i < len; ++i) { if (a[i] == ',') ++n; } for (i = 0; i < n && i < total * 2; ++i) { float bias = atof(a); l.biases[i] = bias; a = strchr(a, ',') + 1; } } return l; } // parser.c softmax_layer parse_softmax(list *options, size_params params) { int groups = option_find_int_quiet(options, "groups", 1); softmax_layer layer = make_softmax_layer(params.batch, params.inputs, groups); layer.temperature = option_find_float_quiet(options, "temperature", 1); char *tree_file = option_find_str(options, "tree", 0); if (tree_file) layer.softmax_tree = read_tree(tree_file); return layer; } // parser.c maxpool_layer parse_maxpool(list *options, size_params params) { int stride = option_find_int(options, "stride", 1); int size = option_find_int(options, "size", stride); int padding = option_find_int_quiet(options, "padding", size - 1); int batch, h, w, c; h = params.h; w = params.w; c = params.c; batch = params.batch; if (!(h && w && c)) error("Layer before maxpool layer must output image."); maxpool_layer layer = make_maxpool_layer(batch, h, w, c, size, stride, padding); return layer; } // parser.c layer parse_reorg(list *options, size_params params) { int stride = option_find_int(options, "stride", 1); int reverse = option_find_int_quiet(options, "reverse", 0); int batch, h, w, c; h = params.h; w = params.w; c = params.c; batch = params.batch; if (!(h && w && c)) error("Layer before reorg layer must output image."); layer layer = make_reorg_layer(batch, w, h, c, stride, reverse); return layer; } // parser.c layer parse_upsample(list *options, size_params params, network net) { int stride = option_find_int(options, "stride", 2); layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride); l.scale = option_find_float_quiet(options, "scale", 1); return l; } // parser.c layer parse_shortcut(list *options, size_params params, network net) { char *l = option_find(options, "from"); int index = atoi(l); if (index < 0) index = params.index + index; int batch = params.batch; layer from = net.layers[index]; layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c); char *activation_s = option_find_str(options, "activation", "linear"); ACTIVATION activation = get_activation(activation_s); s.activation = activation; return s; } // parser.c route_layer parse_route(list *options, size_params params, network net) { char *l = option_find(options, "layers"); int len = strlen(l); if (!l) error("Route Layer must specify input layers"); int n = 1; int i; for (i = 0; i < len; ++i) { if (l[i] == ',') ++n; } int *layers = calloc(n, sizeof(int)); int *sizes = calloc(n, sizeof(int)); for (i = 0; i < n; ++i) { int index = atoi(l); l = strchr(l, ',') + 1; if (index < 0) index = params.index + index; layers[i] = index; sizes[i] = net.layers[index].outputs; } int batch = params.batch; route_layer layer = make_route_layer(batch, n, layers, sizes); convolutional_layer first = net.layers[layers[0]]; layer.out_w = first.out_w; layer.out_h = first.out_h; layer.out_c = first.out_c; for (i = 1; i < n; ++i) { int index = layers[i]; convolutional_layer next = net.layers[index]; if (next.out_w == first.out_w && next.out_h == first.out_h) { layer.out_c += next.out_c; } else { layer.out_h = layer.out_w = layer.out_c = 0; } } return layer; } // parser.c void free_section(section *s) { free(s->type); node *n = s->options->front; while (n) { kvp *pair = (kvp *)n->val; free(pair->key); free(pair); node *next = n->next; free(n); n = next; } free(s->options); free(s); } // parser.c LAYER_TYPE string_to_layer_type(char * type) { if (strcmp(type, "[yolo]") == 0) return YOLO; if (strcmp(type, "[region]") == 0) return REGION; if (strcmp(type, "[conv]") == 0 || strcmp(type, "[convolutional]") == 0) return CONVOLUTIONAL; if (strcmp(type, "[net]") == 0 || strcmp(type, "[network]") == 0) return NETWORK; if (strcmp(type, "[max]") == 0 || strcmp(type, "[maxpool]") == 0) return MAXPOOL; if (strcmp(type, "[reorg]") == 0) return REORG; if (strcmp(type, "[upsample]") == 0) return UPSAMPLE; if (strcmp(type, "[shortcut]") == 0) return SHORTCUT; if (strcmp(type, "[soft]") == 0 || strcmp(type, "[softmax]") == 0) return SOFTMAX; if (strcmp(type, "[route]") == 0) return ROUTE; return BLANK; } // parser.c learning_rate_policy get_policy(char *s) { if (strcmp(s, "random") == 0) return RANDOM; if (strcmp(s, "poly") == 0) return POLY; if (strcmp(s, "constant") == 0) return CONSTANT; if (strcmp(s, "step") == 0) return STEP; if (strcmp(s, "exp") == 0) return EXP; if (strcmp(s, "sigmoid") == 0) return SIG; if (strcmp(s, "steps") == 0) return STEPS; fprintf(stderr, "Couldn't find policy %s, going with constant\n", s); return CONSTANT; } // parser.c void parse_net_options(list *options, network *net) { net->batch = option_find_int(options, "batch", 1); net->learning_rate = option_find_float(options, "learning_rate", .001); net->momentum = option_find_float(options, "momentum", .9); net->decay = option_find_float(options, "decay", .0001); int subdivs = option_find_int(options, "subdivisions", 1); net->time_steps = option_find_int_quiet(options, "time_steps", 1); net->batch /= subdivs; net->batch *= net->time_steps; net->subdivisions = subdivs; char *a = option_find_str(options, "input_calibration", 0); if (a) { int len = strlen(a); int n = 1; int i; for (i = 0; i < len; ++i) { if (a[i] == ',') ++n; } net->input_calibration_size = n; net->input_calibration = (float *)calloc(n, sizeof(float)); for (i = 0; i < n; ++i) { float coef = atof(a); net->input_calibration[i] = coef; a = strchr(a, ',') + 1; } } net->adam = option_find_int_quiet(options, "adam", 0); if (net->adam) { net->B1 = option_find_float(options, "B1", .9); net->B2 = option_find_float(options, "B2", .999); net->eps = option_find_float(options, "eps", .000001); } net->h = option_find_int_quiet(options, "height", 0); net->w = option_find_int_quiet(options, "width", 0); net->c = option_find_int_quiet(options, "channels", 0); net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c); net->max_crop = option_find_int_quiet(options, "max_crop", net->w * 2); net->min_crop = option_find_int_quiet(options, "min_crop", net->w); net->angle = option_find_float_quiet(options, "angle", 0); net->aspect = option_find_float_quiet(options, "aspect", 1); net->saturation = option_find_float_quiet(options, "saturation", 1); net->exposure = option_find_float_quiet(options, "exposure", 1); net->hue = option_find_float_quiet(options, "hue", 0); if (!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied"); char *policy_s = option_find_str(options, "policy", "constant"); net->policy = get_policy(policy_s); net->burn_in = option_find_int_quiet(options, "burn_in", 0); if (net->policy == STEP) { net->step = option_find_int(options, "step", 1); net->scale = option_find_float(options, "scale", 1); } else if (net->policy == STEPS) { char *l = option_find(options, "steps"); char *p = option_find(options, "scales"); if (!l || !p) error("STEPS policy must have steps and scales in cfg file"); int len = strlen(l); int n = 1; int i; for (i = 0; i < len; ++i) { if (l[i] == ',') ++n; } int *steps = calloc(n, sizeof(int)); float *scales = calloc(n, sizeof(float)); for (i = 0; i < n; ++i) { int step = atoi(l); float scale = atof(p); l = strchr(l, ',') + 1; p = strchr(p, ',') + 1; steps[i] = step; scales[i] = scale; } net->scales = scales; net->steps = steps; net->num_steps = n; } else if (net->policy == EXP) { net->gamma = option_find_float(options, "gamma", 1); } else if (net->policy == SIG) { net->gamma = option_find_float(options, "gamma", 1); net->step = option_find_int(options, "step", 1); } else if (net->policy == POLY || net->policy == RANDOM) { net->power = option_find_float(options, "power", 1); } net->max_batches = option_find_int(options, "max_batches", 0); } // parser.c network parse_network_cfg(char *filename, int batch, int quantized) { list *sections = read_cfg(filename); node *n = sections->front; if (!n) error("Config file has no sections"); network net = make_network(sections->size - 1); net.quantized = quantized; net.do_input_calibration = 0; net.gpu_index = gpu_index; size_params params; params.quantized = quantized; section *s = (section *)n->val; list *options = s->options; if (strcmp(s->type, "[net]") == 0 && strcmp(s->type, "[network]") == 0) error("First section must be [net] or [network]"); parse_net_options(options, &net); params.h = net.h; params.w = net.w; params.c = net.c; params.inputs = net.inputs; if (batch > 0) net.batch = batch; params.batch = net.batch; params.time_steps = net.time_steps; params.net = net; size_t workspace_size = 0; n = n->next; int count = 0; free_section(s); fprintf(stderr, "layer filters size input output\n"); while (n) { params.index = count; fprintf(stderr, "%5d ", count); s = (section *)n->val; options = s->options; layer l = { 0 }; LAYER_TYPE lt = string_to_layer_type(s->type); if (lt == CONVOLUTIONAL) { // if(count == 80) params.quantized = 0; // doesn't lost GPU - mAP = 45.61% node *tmp = n->next; if(tmp) tmp = tmp->next; if (tmp) { if (string_to_layer_type(((section *)tmp->val)->type) == YOLO) { params.quantized = 0; // mAP = 53.60% //printf("\n\n i = %d \n\n", count); } } l = parse_convolutional(options, params); } else if (lt == REGION) { l = parse_region(options, params); } else if (lt == YOLO) { l = parse_yolo(options, params); } else if (lt == SOFTMAX) { l = parse_softmax(options, params); net.hierarchy = l.softmax_tree; } else if (lt == MAXPOOL) { l = parse_maxpool(options, params); } else if (lt == REORG) { l = parse_reorg(options, params); } else if (lt == ROUTE) { l = parse_route(options, params, net); } else if (lt == UPSAMPLE) { l = parse_upsample(options, params, net); } else if (lt == SHORTCUT) { l = parse_shortcut(options, params, net); } else { fprintf(stderr, "Type not recognized: %s\n", s->type); } l.dontload = option_find_int_quiet(options, "dontload", 0); l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0); option_unused(options); net.layers[count] = l; if (l.workspace_size > workspace_size) workspace_size = l.workspace_size; free_section(s); n = n->next; ++count; if (n) { params.h = l.out_h; params.w = l.out_w; params.c = l.out_c; params.inputs = l.outputs; } } free_list(sections); net.outputs = get_network_output_size(net); net.output = get_network_output(net); if (workspace_size) { //printf("%ld\n", workspace_size); net.workspace = calloc(1, workspace_size); } return net; } // -------------- gettimeofday for Windows-------------------- #if defined(_MSC_VER) int gettimeofday(struct timeval *tv, struct timezone *tz) { FILETIME ft; unsigned __int64 tmpres = 0; static int tzflag; if (NULL != tv) { GetSystemTimeAsFileTime(&ft); tmpres |= ft.dwHighDateTime; tmpres <<= 32; tmpres |= ft.dwLowDateTime; /*converting file time to unix epoch*/ tmpres -= DELTA_EPOCH_IN_MICROSECS; tmpres /= 10; /*convert into microseconds*/ tv->tv_sec = (long)(tmpres / 1000000UL); tv->tv_usec = (long)(tmpres % 1000000UL); } if (NULL != tz) { if (!tzflag) { _tzset(); tzflag++; } tz->tz_minuteswest = _timezone / 60; tz->tz_dsttime = _daylight; } return 0; } #endif // _MSC_VER // ------------------------------------------------------ // Calculate mAP and TP/FP/FN, IoU, F1 #include "pthread.h" //#include "box.h" /* // from: box.h typedef struct { float x, y, w, h; } box; */ float box_iou(box a, box b); typedef enum { CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, LETTERBOX_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA } data_type; typedef struct matrix { int rows, cols; float **vals; } matrix; typedef struct { int w, h; matrix X; matrix y; int shallow; int *num_boxes; box **boxes; } data; typedef struct { int id; float x, y, w, h; float left, right, top, bottom; } box_label; typedef struct load_args { int threads; char **paths; char *path; int n; int m; char **labels; int h; int w; int out_w; int out_h; int nh; int nw; int num_boxes; int min, max, size; int classes; int background; int scale; int small_object; float jitter; int flip; float angle; float aspect; float saturation; float exposure; float hue; data *d; image *im; image *resized; data_type type; tree *hierarchy; } load_args; int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(4 + l.classes + 1) + entry*l.w*l.h + loc; } int yolo_num_detections(layer l, float thresh) { int i, n; int count = 0; for (i = 0; i < l.w*l.h; ++i) { for (n = 0; n < l.n; ++n) { int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); if (l.output[obj_index] > thresh) { ++count; } } } return count; } int num_detections(network *net, float thresh) { int i; int s = 0; for (i = 0; i < net->n; ++i) { layer l = net->layers[i]; if (l.type == YOLO) { s += yolo_num_detections(l, thresh); } if (l.type == DETECTION || l.type == REGION) { s += l.w*l.h*l.n; } } return s; } detection *make_network_boxes(network *net, float thresh, int *num) { layer l = net->layers[net->n - 1]; int i; int nboxes = num_detections(net, thresh); if (num) *num = nboxes; detection *dets = calloc(nboxes, sizeof(detection)); for (i = 0; i < nboxes; ++i) { dets[i].prob = calloc(l.classes, sizeof(float)); if (l.coords > 4) { dets[i].mask = calloc(l.coords - 4, sizeof(float)); } } return dets; } void free_detections(detection *dets, int n) { int i; for (i = 0; i < n; ++i) { free(dets[i].prob); if (dets[i].mask) free(dets[i].mask); } free(dets); } void find_replace(char *str, char *orig, char *rep, char *output) { char buffer[4096] = { 0 }; char *p; sprintf(buffer, "%s", str); if (!(p = strstr(buffer, orig))) { // Is 'orig' even in 'str'? sprintf(output, "%s", str); return; } *p = '\0'; sprintf(output, "%s%s%s", buffer, rep, p + strlen(orig)); } void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative, int letter) { int i; int new_w = 0; int new_h = 0; if (letter) { if (((float)netw / w) < ((float)neth / h)) { new_w = netw; new_h = (h * netw) / w; } else { new_h = neth; new_w = (w * neth) / h; } } else { new_w = netw; new_h = neth; } for (i = 0; i < n; ++i) { box b = dets[i].bbox; b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw); b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth); b.w *= (float)netw / new_w; b.h *= (float)neth / new_h; if (!relative) { b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } // yolo_layer.c box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride) { box b; b.x = (i + x[index + 0 * stride]) / lw; b.y = (j + x[index + 1 * stride]) / lh; b.w = exp(x[index + 2 * stride]) * biases[2 * n] / w; b.h = exp(x[index + 3 * stride]) * biases[2 * n + 1] / h; return b; } // yolo_layer.c int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets, int letter) { int i, j, n; float *predictions = l.output; //if (l.batch == 2) avg_flipped_yolo(l); int count = 0; for (i = 0; i < l.w*l.h; ++i) { int row = i / l.w; int col = i % l.w; for (n = 0; n < l.n; ++n) { int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4); float objectness = predictions[obj_index]; //if (objectness <= thresh) continue; // incorrect behavior for Nan values if (objectness > thresh) { int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h); dets[count].objectness = objectness; dets[count].classes = l.classes; for (j = 0; j < l.classes; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j); float prob = objectness*predictions[class_index]; dets[count].prob[j] = (prob > thresh) ? prob : 0; } ++count; } } } correct_yolo_boxes(dets, count, w, h, netw, neth, relative, letter); return count; } // get prediction boxes: yolov2_forward_network.c void get_region_boxes_cpu(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map); void custom_get_region_detections(layer l, int w, int h, int net_w, int net_h, float thresh, int *map, float hier, int relative, detection *dets, int letter) { box *boxes = calloc(l.w*l.h*l.n, sizeof(box)); float **probs = calloc(l.w*l.h*l.n, sizeof(float *)); int i, j; for (j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes, sizeof(float *)); get_region_boxes_cpu(l, 1, 1, thresh, probs, boxes, 0, map); for (j = 0; j < l.w*l.h*l.n; ++j) { dets[j].classes = l.classes; dets[j].bbox = boxes[j]; dets[j].objectness = 1; for (i = 0; i < l.classes; ++i) { dets[j].prob[i] = probs[j][i]; } } free(boxes); free_ptrs((void **)probs, l.w*l.h*l.n); //correct_region_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative); correct_yolo_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative, letter); } void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets, int letter) { int j; for (j = 0; j < net->n; ++j) { layer l = net->layers[j]; if (l.type == YOLO) { int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets, letter); dets += count; } if (l.type == REGION) { custom_get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets, letter); //get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets); dets += l.w*l.h*l.n; } } } detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num, int letter) { detection *dets = make_network_boxes(net, thresh, num); fill_network_boxes(net, w, h, thresh, hier, map, relative, dets, letter); return dets; } void *load_thread(void *ptr) { load_args a = *(struct load_args*)ptr; if (a.type == IMAGE_DATA) { *(a.im) = load_image(a.path, 0, 0, 3); *(a.resized) = resize_image(*(a.im), a.w, a.h); //printf(" a.path = %s, a.w = %d, a.h = %d \n", a.path, a.w, a.h); } else if (a.type == LETTERBOX_DATA) { printf(" LETTERBOX_DATA isn't implemented \n"); getchar(); //*(a.im) = load_image(a.path, 0, 0, 0); //*(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else { printf("unknown DATA type = %d \n", a.type); getchar(); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if (pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } box_label *read_boxes(char *filename, int *n) { box_label *boxes = calloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { //file_error(filename); *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while (fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5) { boxes = realloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w / 2; boxes[count].right = x + w / 2; boxes[count].top = y - h / 2; boxes[count].bottom = y + h / 2; ++count; } fclose(file); *n = count; return boxes; } typedef struct { box b; float p; int class_id; int image_index; int truth_flag; int unique_truth_index; } box_prob; int detections_comparator(const void *pa, const void *pb) { box_prob a = *(box_prob *)pa; box_prob b = *(box_prob *)pb; float diff = a.p - b.p; if (diff < 0) return 1; else if (diff > 0) return -1; return 0; } int nms_comparator_v3(const void *pa, const void *pb) { detection a = *(detection *)pa; detection b = *(detection *)pb; float diff = 0; if (b.sort_class >= 0) { diff = a.prob[b.sort_class] - b.prob[b.sort_class]; } else { diff = a.objectness - b.objectness; } if (diff < 0) return 1; else if (diff > 0) return -1; return 0; } void do_nms_sort_v3(detection *dets, int total, int classes, float thresh) { int i, j, k; k = total - 1; for (i = 0; i <= k; ++i) { if (dets[i].objectness == 0) { detection swap = dets[i]; dets[i] = dets[k]; dets[k] = swap; --k; --i; } } total = k + 1; for (k = 0; k < classes; ++k) { for (i = 0; i < total; ++i) { dets[i].sort_class = k; } qsort(dets, total, sizeof(detection), nms_comparator_v3); for (i = 0; i < total; ++i) { //printf(" k = %d, \t i = %d \n", k, i); if (dets[i].prob[k] == 0) continue; box a = dets[i].bbox; for (j = i + 1; j < total; ++j) { box b = dets[j].bbox; if (box_iou(a, b) > thresh) { dets[j].prob[k] = 0; } } } } } void validate_detector_map(char *datacfg, char *cfgfile, char *weightfile, float thresh_calc_avg_iou, int quantized, int save_params, const float iou_thresh) { list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "dataset/target.txt"); char *difficult_valid_images = option_find_str(options, "difficult", NULL); char *name_list = option_find_str(options, "names", "yolohw.names"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network net = parse_network_cfg(cfgfile, 1, quantized); //parse_network_cfg_custom(cfgfile, 1); // set batch=1 if (weightfile) { load_weights_upto_cpu(&net, weightfile, net.n); } //set_batch_network(&net, 1); yolov2_fuse_conv_batchnorm(net); if (quantized) { do_quantization(net); if (save_params) { printf("\n Saving quantized parameters... \n\n"); save_quantized_model(net); } } srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); char **paths_dif = NULL; if (difficult_valid_images) { list *plist_dif = get_paths(difficult_valid_images); paths_dif = (char **)list_to_array(plist_dif); } layer l = net.layers[net.n - 1]; int classes = l.classes; int m = plist->size; int i = 0; int t; const float thresh = .005; float nms = .45; //const float iou_thresh = 0.5; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = { 0 }; args.w = net.w; args.h = net.h; args.type = IMAGE_DATA; //args.type = LETTERBOX_DATA; //const float thresh_calc_avg_iou = 0.24; float avg_iou = 0; int tp_for_thresh = 0; int fp_for_thresh = 0; box_prob *detections = calloc(1, sizeof(box_prob)); int detections_count = 0; int unique_truth_count = 0; int *truth_classes_count = calloc(classes, sizeof(int)); for (t = 0; t < nthreads; ++t) { args.path = paths[i + t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } time_t start = time(0); for (i = nthreads; i < m + nthreads; i += nthreads) { fprintf(stderr, "%d\n", i); for (t = 0; t < nthreads && i + t - nthreads < m; ++t) { pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for (t = 0; t < nthreads && i + t < m; ++t) { args.path = paths[i + t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for (t = 0; t < nthreads && i + t - nthreads < m; ++t) { #ifndef WIN32 //if (!quantized) usleep(50000); usleep(50000); // 0.005 sec #endif const int image_index = i + t - nthreads; char *path = paths[image_index]; //char *id = basecfg(path); float *X = val_resized[t].data; //network_predict(net, X); if (quantized) { network_predict_quantized(net, X); // quantized //nms = 0.2; } else { network_predict_cpu(net, X); } int nboxes = 0; int letterbox = (args.type == LETTERBOX_DATA); float hier_thresh = 0; detection *dets = get_network_boxes(&net, 1, 1, thresh, hier_thresh, 0, 0, &nboxes, letterbox); //detection *dets = get_network_boxes(&net, val[t].w, val[t].h, thresh, hier_thresh, 0, 1, &nboxes, letterbox); // for letterbox=1 if (nms) do_nms_sort_v3(dets, nboxes, l.classes, nms); char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".bmp", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); //printf(" labelpath = %s \n", labelpath); int i, j; for (j = 0; j < num_labels; ++j) { truth_classes_count[truth[j].id]++; } // difficult box_label *truth_dif = NULL; int num_labels_dif = 0; if (paths_dif) { char *path_dif = paths_dif[image_index]; char labelpath_dif[4096]; find_replace(path_dif, "images", "labels", labelpath_dif); find_replace(labelpath_dif, "JPEGImages", "labels", labelpath_dif); find_replace(labelpath_dif, ".jpg", ".txt", labelpath_dif); find_replace(labelpath_dif, ".JPEG", ".txt", labelpath_dif); find_replace(labelpath_dif, ".png", ".txt", labelpath_dif); truth_dif = read_boxes(labelpath_dif, &num_labels_dif); } const int checkpoint_detections_count = detections_count; for (i = 0; i < nboxes; ++i) { int class_id; for (class_id = 0; class_id < classes; ++class_id) { float prob = dets[i].prob[class_id]; if (prob > 0) { detections_count++; detections = realloc(detections, detections_count * sizeof(box_prob)); detections[detections_count - 1].b = dets[i].bbox; detections[detections_count - 1].p = prob; detections[detections_count - 1].image_index = image_index; detections[detections_count - 1].class_id = class_id; detections[detections_count - 1].truth_flag = 0; detections[detections_count - 1].unique_truth_index = -1; int truth_index = -1; float max_iou = 0; for (j = 0; j < num_labels; ++j) { box t = { truth[j].x, truth[j].y, truth[j].w, truth[j].h }; //printf(" IoU = %f, prob = %f, class_id = %d, truth[j].id = %d \n", // box_iou(dets[i].bbox, t), prob, class_id, truth[j].id); float current_iou = box_iou(dets[i].bbox, t); if (current_iou > iou_thresh && class_id == truth[j].id) { if (current_iou > max_iou) { max_iou = current_iou; truth_index = unique_truth_count + j; } } } // best IoU if (truth_index > -1) { detections[detections_count - 1].truth_flag = 1; detections[detections_count - 1].unique_truth_index = truth_index; } else { // if object is difficult then remove detection for (j = 0; j < num_labels_dif; ++j) { box t = { truth_dif[j].x, truth_dif[j].y, truth_dif[j].w, truth_dif[j].h }; float current_iou = box_iou(dets[i].bbox, t); if (current_iou > iou_thresh && class_id == truth_dif[j].id) { --detections_count; break; } } } // calc avg IoU, true-positives, false-positives for required Threshold if (prob > thresh_calc_avg_iou) { int z, found = 0; for (z = checkpoint_detections_count; z < detections_count - 1; ++z) if (detections[z].unique_truth_index == truth_index) { found = 1; break; } if (truth_index > -1 && found == 0) { avg_iou += max_iou; ++tp_for_thresh; } else fp_for_thresh++; } } } } unique_truth_count += num_labels; free_detections(dets, nboxes); free_image(val[t]); free_image(val_resized[t]); } } if ((tp_for_thresh + fp_for_thresh) > 0) { avg_iou = avg_iou / (tp_for_thresh + fp_for_thresh); } // SORT(detections) qsort(detections, detections_count, sizeof(box_prob), detections_comparator); typedef struct { double precision; double recall; int tp, fp, fn; } pr_t; // for PR-curve pr_t **pr = calloc(classes, sizeof(pr_t*)); for (i = 0; i < classes; ++i) { pr[i] = calloc(detections_count, sizeof(pr_t)); } printf("detections_count = %d, unique_truth_count = %d \n", detections_count, unique_truth_count); int *truth_flags = calloc(unique_truth_count, sizeof(int)); int rank; for (rank = 0; rank < detections_count; ++rank) { if (rank % 100 == 0) printf(" rank = %d of ranks = %d \r", rank, detections_count); if (rank > 0) { int class_id; for (class_id = 0; class_id < classes; ++class_id) { pr[class_id][rank].tp = pr[class_id][rank - 1].tp; pr[class_id][rank].fp = pr[class_id][rank - 1].fp; } } box_prob d = detections[rank]; // if (detected && isn't detected before) if (d.truth_flag == 1) { if (truth_flags[d.unique_truth_index] == 0) { truth_flags[d.unique_truth_index] = 1; pr[d.class_id][rank].tp++; // true-positive } } else { pr[d.class_id][rank].fp++; // false-positive } for (i = 0; i < classes; ++i) { const int tp = pr[i][rank].tp; const int fp = pr[i][rank].fp; const int fn = truth_classes_count[i] - tp; // false-negative = objects - true-positive pr[i][rank].fn = fn; if ((tp + fp) > 0) pr[i][rank].precision = (double)tp / (double)(tp + fp); else pr[i][rank].precision = 0; if ((tp + fn) > 0) pr[i][rank].recall = (double)tp / (double)(tp + fn); else pr[i][rank].recall = 0; } } free(truth_flags); double mean_average_precision = 0; int unincluded_classes = 0; // To prevent the AP of unincluded classes from spoiling the mAP for (i = 0; i < classes; ++i) { double avg_precision = 0; int point; if (truth_classes_count[i] == 0) unincluded_classes++; for (point = 0; point < 11; ++point) { double cur_recall = point * 0.1; double cur_precision = 0; for (rank = 0; rank < detections_count; ++rank) { if (pr[i][rank].recall >= cur_recall) { // > or >= if (pr[i][rank].precision > cur_precision) { cur_precision = pr[i][rank].precision; } } } //printf("class_id = %d, point = %d, cur_recall = %.4f, cur_precision = %.4f \n", i, point, cur_recall, cur_precision); avg_precision += cur_precision; } avg_precision = avg_precision / 11; printf("class_id = %d, name = %s, \t ap = %2.2f %% \n", i, names[i], avg_precision * 100); mean_average_precision += avg_precision; } const float cur_precision = (float)tp_for_thresh / ((float)tp_for_thresh + (float)fp_for_thresh); const float cur_recall = (float)tp_for_thresh / ((float)tp_for_thresh + (float)(unique_truth_count - tp_for_thresh)); const float f1_score = 2.F * cur_precision * cur_recall / (cur_precision + cur_recall); printf(" for thresh = %1.2f, precision = %1.2f, recall = %1.2f, F1-score = %1.2f \n", thresh_calc_avg_iou, cur_precision, cur_recall, f1_score); printf(" for thresh = %0.2f, TP = %d, FP = %d, FN = %d, average IoU = %2.2f %% \n", thresh_calc_avg_iou, tp_for_thresh, fp_for_thresh, unique_truth_count - tp_for_thresh, avg_iou * 100); mean_average_precision = mean_average_precision / (classes-unincluded_classes); if (iou_thresh == 0.5) { printf("\n mean average precision (mAP) = %f, or %2.2f %% \n", mean_average_precision, mean_average_precision * 100); } else { printf("\n average precision (AP) = %f, or %2.2f %% for IoU threshold = %f \n", mean_average_precision, mean_average_precision * 100, iou_thresh); } for (i = 0; i < classes; ++i) { free(pr[i]); } free(pr); free(detections); free(truth_classes_count); fprintf(stderr, "Total Detection Time: %f Seconds\n", (double)(time(0) - start)); //getchar(); }
GB_unaryop__identity_int64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_fp64 // op(A') function: GB_tran__identity_int64_fp64 // C type: int64_t // A type: double // cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z ; GB_CAST_SIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_fp64 ( int64_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_uint16_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_fc32) // op(A') function: GB (_unop_tran__identity_uint16_fc32) // C type: uint16_t // A type: GxB_FC32_t // cast: uint16_t cij = GB_cast_to_uint16_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_fc32) ( uint16_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_int64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_int64 // op(A') function: GB_tran__lnot_int64_int64 // C type: int64_t // A type: int64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_int64 ( int64_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ParallelCLBPacker.h
/** * @file ParallelCLBPacker.h * @author Tingyuan LIANG (tliang@connect.ust.hk) * @brief This header file contains the definitions of ParallelCLBPacker class and its internal modules and APIs which * finally packs LUT/FF/MUX/CARRY elements into legal CLB sites in a parallel approach. * @version 0.1 * @date 2021-10-02 * * @copyright Copyright (c) 2021 Reconfiguration Computing Systems Lab, The Hong Kong University of Science and * Technology. All rights reserved. * */ #ifndef _PARALLELCLBPACKER_ #define _PARALLELCLBPACKER_ #include "DesignInfo.h" #include "DeviceInfo.h" #include "KDTree/KDTree.h" #include "MaximalCardinalityMatching/MaximalCardinalityMatching.h" #include "PlacementInfo.h" #include "const.h" #include "dumpZip.h" #include "readZip.h" #include "strPrint.h" #include "stringCheck.h" #include <assert.h> #include <cmath> #include <fstream> #include <iostream> #include <map> #include <omp.h> #include <queue> #include <set> #include <sstream> #include <string> #include <vector> // implemented based on the paper's Algorithm 1: // W. Li and D. Z. Pan, "A New Paradigm for FPGA Placement Without Explicit Packing," // in IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, vol. 38, no. 11, pp. 2113-2126, // Nov. 2019, doi: 10.1109/TCAD.2018.2877017. /** * @brief a utility struct for the comparison between PlacementInfo::PlacementUnit according to PU ID * * The default STL set for PlacementUnit pointers will lead to random iteration order of the PlacementUnits in a set and * lead to slight variation in the final packing result. * */ struct Packing_PUcompare { inline bool operator()(PlacementInfo::PlacementUnit *lhs, PlacementInfo::PlacementUnit *rhs) const { return lhs->getId() < rhs->getId(); } }; /** * @brief ParallelCLBPacker will finally pack LUT/FF/MUX/CARRY elements into legal CLB sites in a parallel approach. * * implemented based on the paper's Algorithm 1: * W. Li and D. Z. Pan, "A New Paradigm for FPGA Placement Without Explicit Packing," * in IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, vol. 38, no. 11, pp. 2113-2126, * Nov. 2019, doi: 10.1109/TCAD.2018.2877017. * * We also provide many detailed optimization techniques according to our observation, macro constraints, timing * demands, and the application characteristics to improve the packing efficiency and quality. * */ class ParallelCLBPacker { public: /** * @brief Construct a new Parallel CLB Packer object * * @param designInfo given design information * @param deviceInfo given device information * @param placementInfo the PlacementInfo for this placer to handle * @param JSONCfg the user-defined placement configuration * @param unchangedIterationThr specify how many iterations a PlacementUnit should stay at the top priority of a * site before we finally map it to the site * @param numNeighbor the threshold number of cells for site * @param deltaD the increase step of the neighbor search diameter * @param curD current neighbor search diameter * @param maxD the maximum constraint of the neighbor search diameter * @param PQSize the size of priority queue (the low-priority candidates will be removed) * @param HPWLWeight the factor of HPWL overhead in packing evaluation for a cell * @param packerName the name of this packer */ ParallelCLBPacker(DesignInfo *designInfo, DeviceInfo *deviceInfo, PlacementInfo *placementInfo, std::map<std::string, std::string> &JSONCfg, int unchangedIterationThr, int numNeighbor, float deltaD, float curD, float maxD, int PQSize, float HPWLWeight, std::string packerName); ~ParallelCLBPacker() { for (auto packingSite : packingSites) delete packingSite; } /** * @brief PackedControlSet stores the data of a combination of FFs within one control set (clock * enable/preset-reset/clock) that can be packed in a site. * */ class PackedControlSet { public: PackedControlSet() { FFs.clear(); }; /** * @brief Construct a new Packed Control Set object by cloning another one * * @param anotherControlSet */ PackedControlSet(const PackedControlSet &anotherControlSet) { FFs.clear(); assert((anotherControlSet.getSize() > 0 || anotherControlSet.getCSId() < 0) && "the other one control set should not be empty."); CSId = anotherControlSet.getCSId(); FFs = anotherControlSet.getFFs(); if (CSId >= 0) { CLK = anotherControlSet.getCLK(); SR = anotherControlSet.getSR(); CE = anotherControlSet.getCE(); FFType = anotherControlSet.getFFType(); } else { CLK = nullptr; SR = nullptr; CE = nullptr; } }; /** * @brief undate a new Packed Control Set object by cloning another one * * @param anotherControlSet * @return PackedControlSet& */ PackedControlSet &operator=(const PackedControlSet &anotherControlSet) { FFs.clear(); assert((anotherControlSet.getSize() > 0 || anotherControlSet.getCSId() < 0) && "the other one control set should not be empty."); CSId = anotherControlSet.getCSId(); FFs = anotherControlSet.getFFs(); if (CSId >= 0) { CLK = anotherControlSet.getCLK(); SR = anotherControlSet.getSR(); CE = anotherControlSet.getCE(); FFType = anotherControlSet.getFFType(); } else { CLK = nullptr; SR = nullptr; CE = nullptr; } return *this; }; ~PackedControlSet(){}; /** * @brief Get the the number of FFs in this control set * * @return unsigned int */ inline unsigned int getSize() const { return FFs.size(); } /** * @brief get the FFs in this PackedControlSet * * @return const std::vector<DesignInfo::DesignCell *>& */ inline const std::vector<DesignInfo::DesignCell *> &getFFs() const { return FFs; } /** * @brief clear the control set information in this PackedControlSet (only when there is no FF in this set) * */ inline void reset() { assert(FFs.size() == 0); CSId = -1; CLK = nullptr; SR = nullptr; CE = nullptr; } /** * @brief add a FF into this PackedControlSet and check the compatibility * * @param curFF a given FF cell */ inline void addFF(DesignInfo::DesignCell *curFF) { if (CSId < 0) { if (!curFF->isVirtualCell()) { assert(curFF->getControlSetInfo()); CSId = curFF->getControlSetInfo()->getId(); CLK = curFF->getControlSetInfo()->getCLK(); SR = curFF->getControlSetInfo()->getSR(); CE = curFF->getControlSetInfo()->getCE(); FFType = curFF->getOriCellType(); } } else { if (!curFF->isVirtualCell()) { assert(curFF->getControlSetInfo()->getId() == CSId); } } FFs.push_back(curFF); } /** * @brief remove a specify i-th FF from this PackedControlSet * * @param i a specified index of the FF to be removed */ inline void removeXthFF(int i) { FFs.erase(FFs.begin() + i); } /** * @brief find the index in the list for a given FF cell pointer * * @param curFF a given FF cell * @return int */ inline int findFF(DesignInfo::DesignCell *curFF) { for (unsigned int i = 0; i < FFs.size(); i++) { if (FFs[i] == curFF) return i; } return -1; } /** * @brief get the control set id of this PackedControlSet. * * The control set determines whether two FFs can be packed. * * @return int */ inline int getCSId() const { return CSId; } /** * @brief set the control set id of this PackedControlSet. * * The control set determines whether two FFs can be packed. * * @param _CSId the id of the target control set */ inline void setCSId(int _CSId) { CSId = _CSId; } inline DesignInfo::DesignNet *getCLK() const { assert(CSId >= 0); return CLK; } inline DesignInfo::DesignNet *getSR() const { assert(CSId >= 0); return SR; } inline DesignInfo::DesignNet *getCE() const { assert(CSId >= 0); return CE; } inline DesignInfo::DesignCellType getFFType() const { assert(CSId >= 0); return FFType; } /** * @brief check whether this PackedControlSet can be compatible with a given control set ID * * @param inputCSId the id of the target control set * @return true * @return false */ inline bool compatibleWith(int inputCSId) { if (CSId == -1) return true; return inputCSId == CSId; } private: int CSId = -1; DesignInfo::DesignNet *CLK = nullptr; DesignInfo::DesignNet *SR = nullptr; DesignInfo::DesignNet *CE = nullptr; DesignInfo::DesignCellType FFType; std::vector<DesignInfo::DesignCell *> FFs; }; /** * @brief PackingCLBSite is a container for the packing information (parameters, candidates and packing status) of a * specific DeviceInfo::DeviceSite * */ class PackingCLBSite { public: /** * @brief Construct a new Packing CLB Site object * * @param placementInfo the PlacementInfo for this placer to handle * @param CLBSite * @param unchangedIterationThr specify how many iterations a PlacementUnit should stay at the top priority of a * site before we finally map it to the site * @param numNeighbor the threshold number of cells for site * @param deltaD the increase step of the neighbor search diameter * @param curD current neighbor search diameter * @param maxD the maximum constraint of the neighbor search diameter * @param PQSize the size of priority queue (the low-priority candidates will be removed) * @param y2xRatio a factor to tune the weights of the net spanning in Y-coordinate relative to the net spanning * in X-coordinate * @param HPWLWeight the factor of HPWL overhead in packing evaluation for a cell * @param PUId2PackingCLBSite the reference of a map (actually a vector) recording the mapping of PlacementUnits * to the PackingCLBSites */ PackingCLBSite(PlacementInfo *placementInfo, DeviceInfo::DeviceSite *CLBSite, int unchangedIterationThr, int numNeighbor, float deltaD, float curD, float maxD, unsigned int PQSize, float y2xRatio, float HPWLWeight, std::vector<PackingCLBSite *> &PUId2PackingCLBSite) : placementInfo(placementInfo), CLBSite(CLBSite), unchangedIterationThr(unchangedIterationThr), numNeighbor(numNeighbor), deltaD(deltaD), curD(curD), maxD(maxD), PQSize(PQSize), y2xRatio(y2xRatio), HPWLWeight(HPWLWeight), PUId2PackingCLBSite(PUId2PackingCLBSite), determinedClusterInSite(nullptr) { neighborPUs.clear(); seedClusters.clear(); seedClusters.clear(); priorityQueue.clear(); PU2TopCnt.clear(); // PU2HPWLChange.clear(); } ~PackingCLBSite() { if (determinedClusterInSite) { delete determinedClusterInSite; } for (auto tmpCluster : priorityQueue) { delete tmpCluster; } } /** * @brief PackingCLBCluster is a container of cells/PlacementUnits which can be packed in the corresponding CLB * site * */ class PackingCLBCluster { public: /** * @brief Construct a new Packing CLB Cluster object (it should not be called.) * */ PackingCLBCluster() { assert(false && "PackingCLBCluster should not initialize without parameters \"parentPackingCLB\". This " "problem might be caused by resizing vector to a longer one"); } PackingCLBCluster(PackingCLBSite *parentPackingCLB) : parentPackingCLB(parentPackingCLB) { placementInfo = parentPackingCLB->getPlacementInfo(); id = random(); PUs.clear(); FFControlSets.clear(); FFControlSets.resize(4); singleLUTs.clear(); pairedLUTs.clear(); // nets.clear(); } ~PackingCLBCluster(){}; PackingCLBCluster(PackingCLBCluster *anotherPackingCLBCluster) { id = anotherPackingCLBCluster->getId(); FFControlSets = anotherPackingCLBCluster->getFFControlSets(); singleLUTs = anotherPackingCLBCluster->getSingleLUTs(); pairedLUTs = anotherPackingCLBCluster->getPairedLUTs(); PUs = anotherPackingCLBCluster->getPUs(); scoreInSite = anotherPackingCLBCluster->getScoreInSite(); parentPackingCLB = anotherPackingCLBCluster->getParentPackingCLB(); placementInfo = parentPackingCLB->getPlacementInfo(); // net2ConnectivityScore = anotherPackingCLBCluster->getNet2ConnectivityScore(); HPWLChange = anotherPackingCLBCluster->getHPWLChange(); totalConnectivityScore = anotherPackingCLBCluster->getTotalConnectivityScore(); totalCellNum = anotherPackingCLBCluster->getTotalCellNum(); totalLen = anotherPackingCLBCluster->getTotalLen(); numMuxes = anotherPackingCLBCluster->getNumMuxes(); } /** * @brief Get the Id of the PackingCLBCluster (just for debug information don't use it in algorithm) * * @return int */ inline int getId() const { return id; } /** * @brief refresh the Id of the PackingCLBCluster so we can know it is changed. * */ inline void refreshId() { id = random(); } /** * @brief check how many input pins will be needed if the two LUTs are packed. * * @param LUTA * @param LUTB * @return unsigned int */ inline unsigned int getPairPinNum(DesignInfo::DesignCell *LUTA, DesignInfo::DesignCell *LUTB) { if (LUTA->getInputPins().size() == 6 || LUTB->getInputPins().size() == 6 || LUTA->isLUT6() || LUTB->isLUT6()) return 12; int pinNumA = 0; int totalPin = 0; int netIds[5]; // be aware that a LUT might have pins connected to the same net and they should be // treated as different inputs. for (auto tmpPin : LUTA->getInputPins()) { if (!tmpPin->isUnconnected()) { netIds[pinNumA] = tmpPin->getNet()->getElementIdInType(); pinNumA++; } } totalPin = pinNumA; for (auto tmpPin : LUTB->getInputPins()) { if (!tmpPin->isUnconnected()) { bool matched = false; for (int i = 0; i < pinNumA; i++) { if (netIds[i] >= 0 && netIds[i] == tmpPin->getNet()->getElementIdInType()) { netIds[i] = -1; matched = true; break; } } if (!matched) { totalPin++; } } } return totalPin; } /** * @brief conduct maximun cardinality matching algorithm to pair LUTs * * @param verbose */ void maxCardinalityMatching(bool verbose = false); /** * @brief Get the number of pins within this site for a given net (more pins are located in ont site will * reduce the demand of routing resource) * * @param curNet a given net * @return int */ int getInternalPinsNum(PlacementInfo::PlacementNet *curNet); /** * @brief check whether a control set can be placed in a given half CLB * * Since in current architecture, FFs are packed in half CLBs in the site. The control sets of half CLB * pairs should be compatible. * * @param CSPtr a given control set pointer * @param anotherHalfCLB the other one half CLB id in this half CLB pair * @return true if the control set can be placed in the given half CLB * @return false if the control set CANNOT be placed in the given half CLB */ bool compatibleInOneHalfCLB(DesignInfo::ControlSetInfo *CSPtr, int anotherHalfCLB); /** * @brief try to add a given LUT into this cluster * * @param curLUT a given LUT * @return true if this attempt is successful * @return false if this attempt FAILED. */ bool addLUT(DesignInfo::DesignCell *curLUT); /** * @brief try to add a given FF into a specific half CLB in this cluster * * @param curFF a given FF * @param halfCLB the target half CLB id in this cluster * @return true if this attempt is successful * @return false if this attempt FAILED. */ bool addToFFSet(DesignInfo::DesignCell *curFF, int halfCLB); /** * @brief try to add a given list of FFs into a specific half CLB in this cluster * * @param curFFs a given list of FFs * @param halfCLB the target half CLB id in this cluster * @return true if this attempt is successful * @return false if this attempt FAILED. */ bool addToFFSet(std::vector<DesignInfo::DesignCell *> curFFs, int halfCLB); /** * @brief try to add a given FF into this cluster * * @param curFF a given FF * @param enforceHalfCLB (default -1/no limit) limit the candidate half CLB id for this FF * @param enforceMainFFSlot (default false) constaint that the FF can be only placed in the main half CLB * (the one connected to LUT6 output pins in the CLB site). There are two types of half CLBs, we call those * connected to LUT6 output pins "main half CLB slots" and those connected to LUT5 output pins "secondary * half CLB slots". Some FFs (especially the virtual ones) should be mapped to the "main" ones. * @return true if this attempt is successful * @return false if this attempt FAILED. */ bool addFF(DesignInfo::DesignCell *curFF, int enforceHalfCLB = -1, bool enforceMainFFSlot = false); /** * @brief try to add a given list of FFs into this cluster * * @param curFFs a given list of FFs * @param enforceHalfCLB (default -1/no limit) limit the candidate half CLB id for this FF * @param enforceMainFFSlot (default false) constaint that the FF can be only placed in the main half CLB * (the one connected to LUT6 output pins in the CLB site). There are two types of half CLBs, we call those * connected to LUT6 output pins "main half CLB slots" and those connected to LUT5 output pins "secondary * half CLB slots". Some FFs (especially the virtual ones) should be mapped to the "main" ones. * @param isMuxMacro * @return true if this attempt is successful * @return false if this attempt FAILED. */ bool addFFGroup(std::vector<DesignInfo::DesignCell *> curFFs, int enforceHalfCLB, bool enforceMainFFSlot, bool isMuxMacro); /** * @brief remove a specific LUT from this candidate cluster * * @param curLUT a given LUT */ void removeLUT(DesignInfo::DesignCell *curLUT) { if (singleLUTs.find(curLUT) != singleLUTs.end()) { singleLUTs.erase(curLUT); return; } for (auto LUTPair : pairedLUTs) { if (LUTPair.first == curLUT) { singleLUTs.insert(LUTPair.second); pairedLUTs.erase(LUTPair); return; } else if (LUTPair.second == curLUT) { singleLUTs.insert(LUTPair.first); pairedLUTs.erase(LUTPair); return; } } assert(false && "should be erased successfully"); } /** * @brief remove a specific FF from this candidate cluster * * @param curFF a given FF */ void removeFF(DesignInfo::DesignCell *curFF) { unsigned int i = -1; for (i = 0; i < FFControlSets.size(); i++) { int findFFLoc = FFControlSets[i].findFF(curFF); if (findFFLoc >= 0) { FFControlSets[i].removeXthFF(findFFLoc); if (FFControlSets[i].getSize() == 0) { FFControlSets[i].reset(); } return; } } assert(false && "should not reach here"); } /** * @brief remove some PlacementInfo::PlacementUnit from the cluster for later determined cluster * construction of this site * * @param tmpPU a given PlacementInfo::PlacementUnit to be removed */ void removePUToConstructDetCluster(PlacementInfo::PlacementUnit *tmpPU) { assert(PUs.find(tmpPU) != PUs.end()); std::vector<DesignInfo::DesignCell *> cellsToRemove(0); if (auto unpackCell = dynamic_cast<PlacementInfo::PlacementUnpackedCell *>(tmpPU)) { cellsToRemove.push_back(unpackCell->getCell()); } else if (auto curMacro = dynamic_cast<PlacementInfo::PlacementMacro *>(tmpPU)) { for (auto tmpCell : curMacro->getCells()) cellsToRemove.push_back(tmpCell); } // assert(checkCellCorrectness(tmpPU, false)); for (auto curCell : cellsToRemove) { if (curCell->isLUT()) { removeLUT(curCell); } else if (curCell->isFF()) { removeFF(curCell); } else { assert(curCell->isMux()); // assert(false && "unexpected type."); } } PUs.erase(tmpPU); if (auto tmpMacro = dynamic_cast<PlacementInfo::PlacementMacro *>(tmpPU)) { if (tmpMacro->getMacroType() == PlacementInfo::PlacementMacro::PlacementMacroType_MUX7 || tmpMacro->getMacroType() == PlacementInfo::PlacementMacro::PlacementMacroType_MUX8) { numMuxes--; } } // assert(checkCellCorrectness(tmpPU, false)); hashed = false; } /** * @brief check whether the type of the given PlacementUnit is compatible with the site type * * @param tmpPU a given PlacementUnit * @return true the PU is compatible with the siet * @return false the PU is NOT compatible with the siet */ inline bool isPUTypeCompatibleWithSiteType(PlacementInfo::PlacementUnit *tmpPU) { if (auto tmpMacro = dynamic_cast<PlacementInfo::PlacementMacro *>(tmpPU)) { if (tmpMacro->getMacroType() == PlacementInfo::PlacementMacro::PlacementMacroType_LCLB) { if (parentPackingCLB->getCLBSite()->getSiteType() == "SLICEM") return false; } } return true; } /** * @brief try to add a given PlacementUnit into this cluster * * the addPU implemented is based on the following paper's Algorithm 1: * G. Chen et al., “RippleFPGA: Routability-Driven Simultaneous Packing and Placement for Modern FPGAs,” * IEEE Trans. Comput.-Aided Des. Integr. Circuits Syst., vol. 37, no. 10, pp. 2022–2035, * Oct. 2018, doi: 10.1109/TCAD.2017.2778058. * * @param tmpPU a given PlacementUnit * @param allowOverlap whether it is successful if there is * @return true if this attempt is successful * @return false if this attempt FAILED. */ bool addPU(PlacementInfo::PlacementUnit *tmpPU, bool allowOverlap = false); /** * @brief without modifying the original cluster container, try to add a given PlacementUnit into this * cluster * * @param tmpPU a given PlacementUnit * @return true if this attempt is successful * @return false if this attempt FAILED. */ inline bool tryAddPU(PlacementInfo::PlacementUnit *tmpPU) { PackingCLBCluster *fakeCluster = new PackingCLBCluster(this); if (fakeCluster->addPU(tmpPU, false)) { FFControlSets = fakeCluster->getFFControlSets(); singleLUTs = fakeCluster->getSingleLUTs(); pairedLUTs = fakeCluster->getPairedLUTs(); PUs.insert(tmpPU); delete fakeCluster; return true; } else { delete fakeCluster; return false; } } /** * @brief find/print the reason why the PlacementUnit fails to be added into this cluster * * @param tmpPU a given PlacementUnit */ void addPUFailReason(PlacementInfo::PlacementUnit *tmpPU); /** * @brief check whether the cluster contains a specific PlacementUnit * * @param tmpPU a given PlacementUnit * @return true if this attempt is successful * @return false if this attempt FAILED. */ inline bool contains(PlacementInfo::PlacementUnit *tmpPU) { return PUs.find(tmpPU) != PUs.end(); } inline const std::set<PlacementInfo::PlacementUnit *, Packing_PUcompare> &getPUs() const { return PUs; } inline const std::vector<PackedControlSet> &getFFControlSets() const { return FFControlSets; } /** * @brief Get the set of single LUTs in this cluster (some other LUTs have been paired for packing) * * @return const std::set<DesignInfo::DesignCell *>& */ inline const std::set<DesignInfo::DesignCell *> &getSingleLUTs() const { return singleLUTs; } /** * @brief remove a single LUT from the set of single LUTs in this cluster * * @param tmpLUT a given single LUT */ inline void removeSingleLUT(DesignInfo::DesignCell *tmpLUT) { assert(singleLUTs.find(tmpLUT) != singleLUTs.end()); singleLUTs.erase(tmpLUT); } /** * @brief try to remove a single LUT from the single LUT set * * @param tmpLUT a given LUT * @return true if the given LUT is found in the cluster set and can be removed * @return false if the given LUT is NOT found in the cluster set */ inline bool tryRemoveSingleLUT(DesignInfo::DesignCell *tmpLUT) { if (singleLUTs.find(tmpLUT) != singleLUTs.end()) { singleLUTs.erase(tmpLUT); return true; } else { return false; } } /** * @brief try to remove a LUT from the set of paired LUTs * * @param tmpLUT a given LUT * @return true if the given LUT is found in the set of paired LUTs and can be removed * @return false if the given LUT is NOT found in the set of paired LUTs */ inline bool tryRemoveSingleLUTFromPairs(DesignInfo::DesignCell *tmpLUT) { for (auto pair : pairedLUTs) { if (tmpLUT == pair.first) { singleLUTs.insert(pair.second); pairedLUTs.erase(pair); return true; } if (tmpLUT == pair.second) { singleLUTs.insert(pair.first); pairedLUTs.erase(pair); return true; } } return false; } /** * @brief try to remove a pair of LUTs from the set of paired LUTs * * @param tmpLUTA a LUT in the pair * @param tmpLUTB another LUT in the pair * @return true if the given LUTs are found in the set of paired LUTs and can be removed * @return false if the given LUTs are NOT found in the set of paired LUTs */ inline bool tryRemoveLUTPairFromPairs(DesignInfo::DesignCell *tmpLUTA, DesignInfo::DesignCell *tmpLUTB) { for (auto pair : pairedLUTs) { if (tmpLUTA == pair.first) { assert(pair.second == tmpLUTB); pairedLUTs.erase(pair); return true; } if (tmpLUTA == pair.second) { assert(pair.first == tmpLUTB); pairedLUTs.erase(pair); return true; } } return false; } /** * @brief Get the set of the paired LUTs * * @return const std::set<std::pair<DesignInfo::DesignCell *, DesignInfo::DesignCell *>>& */ inline const std::set<std::pair<DesignInfo::DesignCell *, DesignInfo::DesignCell *>> &getPairedLUTs() const { return pairedLUTs; } /** * @brief check whether all the PlacementUnit in this cluster are valid (not bound to other sites) for this * site * * @param PUId2PackingCLBSite the mapping from PUs to sites * @param parentPackingCLBSite the site for this cluster * @return true if all the PUs can be mapped to this site * @return false if some of the PUs CANNOT be mapped to this site */ inline bool areAllPUsValidForThisSite(const std::vector<PackingCLBSite *> &PUId2PackingCLBSite, PackingCLBSite *parentPackingCLBSite) { for (auto tmpPU : PUs) { if (PUId2PackingCLBSite[tmpPU->getId()]) { if (PUId2PackingCLBSite[tmpPU->getId()] != parentPackingCLBSite) { return false; } if (!placementInfo->checkClockColumnLegalization(tmpPU, parentPackingCLB->getCLBSite())) { return false; } } } return true; } /** * @brief Get the score if this cluster is mapped the site * * @return float */ inline float getScoreInSite() const { return scoreInSite; } /** * @brief update the score of this cluster by considering HPWL, interconnection density, timing and etc * */ void updateScoreInSite(); /** * @brief incrementally update the score of this cluster by considering that only a given PlacementUnit will * be added into this cluster * * @param tmpPU a given PlacementUnit */ void incrementalUpdateScoreInSite(PlacementInfo::PlacementUnit *tmpPU); /** * @brief Get the Parent Packing CLB site of this cluster (this can be used to get more device information) * * @return PackingCLBSite* */ inline PackingCLBSite *getParentPackingCLB() const { return parentPackingCLB; } /** * @brief we use a hash function to encode the cluster to easily check duplicated clusters in the candidates * */ void clusterHash() { hashId = PUs.size(); if (parentPackingCLB->getCarryMacro()) { hashId += HiFPlacer_hashprimes[(unsigned char)(~(parentPackingCLB->getCarrySiteOffset()) & 0xff)]; hashId %= 10001777; hashId += HiFPlacer_hashprimes[(unsigned char)(~(parentPackingCLB->getCarryMacro()->getId()) & 0xff)]; hashId %= 10001777; } if (parentPackingCLB->getLUTRAMMacro()) { hashId += HiFPlacer_hashprimes[(unsigned char)(~(parentPackingCLB->getLUTRAMMacro()->getId()) & 0xff)]; hashId %= 10001777; } for (auto tmpPU : PUs) { if (auto unpackCell = dynamic_cast<PlacementInfo::PlacementUnpackedCell *>(tmpPU)) { hashId += HiFPlacer_hashprimes[(unsigned char)(~(unpackCell->getCell()->getCellId()) & 0xff)] * unpackCell->getCell()->getCellId(); hashId %= 10001777; } else if (auto curMacro = dynamic_cast<PlacementInfo::PlacementMacro *>(tmpPU)) { for (auto tmpCell : curMacro->getCells()) { hashId += HiFPlacer_hashprimes[(unsigned char)(~(tmpCell->getCellId()) & 0xff)] * tmpCell->getCellId(); hashId %= 10001777; } } } hashed = true; } /** * @brief incrementally update the hash function with an additional PlacementUnit. This hash will be used to * encode the cluster to easily check duplicated clusters in the candidates * * @param tmpPU * @return int */ inline int clusterHashWithAdditionalPU(PlacementInfo::PlacementUnit *tmpPU) { int clusterHashId = getHash(); if (auto unpackCell = dynamic_cast<PlacementInfo::PlacementUnpackedCell *>(tmpPU)) { clusterHashId += HiFPlacer_hashprimes[(unsigned char)(~(unpackCell->getCell()->getCellId()) & 0xff)] * unpackCell->getCell()->getCellId(); clusterHashId %= 10001777; } else if (auto curMacro = dynamic_cast<PlacementInfo::PlacementMacro *>(tmpPU)) { for (auto tmpCell : curMacro->getCells()) { clusterHashId += HiFPlacer_hashprimes[(unsigned char)(~(tmpCell->getCellId()) & 0xff)] * tmpCell->getCellId(); clusterHashId %= 10001777; } } return clusterHashId; } /** * @brief Get the hash code for this cluster * * @return int */ inline int getHash() { if (!hashed) clusterHash(); return hashId; } /** * @brief Get the hash code of this cluster without changing the class variables of this cluster * * @return int */ inline int getHashConst() const { int hashId = 0; std::vector<DesignInfo::DesignCell *> cellsToCheck(0); for (auto tmpPU : PUs) { if (auto unpackCell = dynamic_cast<PlacementInfo::PlacementUnpackedCell *>(tmpPU)) { hashId += 28901 * unpackCell->getCell()->getCellId(); hashId %= 10001777; } else if (auto curMacro = dynamic_cast<PlacementInfo::PlacementMacro *>(tmpPU)) { for (auto tmpCell : curMacro->getCells()) { hashId += 28901 * tmpCell->getCellId(); hashId %= 10001777; } } } return hashId; } /** * @brief get the HPWL change if a given PlacementUnit is moved to this site * * @param tmpPU a given PlacementUnit * @return float */ inline float getHPWLChangeForPU(PlacementInfo::PlacementUnit *tmpPU) { return parentPackingCLB->getHPWLChangeForPU(tmpPU); } /** * @brief Get the connectivity term in the cluster score object * * @return float */ inline float getTotalConnectivityScore() const { return totalConnectivityScore; } /** * @brief Get the HWPL change term in the cluster score object * * @return float */ inline float getHPWLChange() const { return HPWLChange; } /** * @brief Get the total number of cells in this cluster * * @return int */ inline int getTotalCellNum() const { return totalCellNum; } /** * @brief Get the total number of MUX cells in this cluster * * @return int */ inline int getNumMuxes() const { return numMuxes; } /** * @brief Get the maximum length of the paths which involve this cluster * * @return int */ inline int getTotalLen() const { return totalLen; } /** * @brief Get the total weights of cells in the cluster (each cell will have different weight in the * placement) * * @return float */ inline float getTotalCellWeight() const { float totalCellWeight = 0; for (auto curCell : singleLUTs) { totalCellWeight += parentPackingCLB->getPlacementInfo()->getActualOccupationByCellId(curCell->getCellId()); } for (auto pair : pairedLUTs) { totalCellWeight += parentPackingCLB->getPlacementInfo()->getActualOccupationByCellId(pair.first->getCellId()); totalCellWeight += parentPackingCLB->getPlacementInfo()->getActualOccupationByCellId(pair.second->getCellId()); } for (auto &CS : FFControlSets) { for (auto curCell : CS.getFFs()) { totalCellWeight += parentPackingCLB->getPlacementInfo()->getActualOccupationByCellId(curCell->getCellId()); } } return totalCellWeight; } /** * @brief check whether the cluster contains a specific FF cell * * @param curFF a given FF cell * @return true if the cluster contains a specific FF cell * @return false if the cluster DOES NOT contain a specific FF cell */ inline bool containFF(DesignInfo::DesignCell *curFF) { for (unsigned int i = 0; i < FFControlSets.size(); i++) { if (FFControlSets[i].findFF(curFF) >= 0) return true; } return false; } /** * @brief a API to print the information of cluster * */ void printMyself(); /** * @brief a verification function to check whether the cells in this cluster are REALLY LEGAL. * * @param tmpPU * @param isAddPU * @return true * @return false */ bool checkCellCorrectness(PlacementInfo::PlacementUnit *tmpPU, bool isAddPU); /** * @brief check whether a specific number of Muxes can be compatible with a specific FFset for packing * * Since Mux will use some register input wires for the select signal, we have to check whether the packing * is possible for this situation * * @param i the id for the FF set * @param addNum the number of Muxes we want to add into the CLB * @return true if the given number of Muxes can be added. * @return false if the given number of Muxes CANNOT be added. */ bool checkNumMuxCompatibleInFFSet(int i, int addNum); /** * @brief Get the max length of paths involving a given PlacementUnit * * (unused currently) during packing, we should consider timing factors and the critical path should be * assigned top priority. * * @param curPU a given PlacementUnit * @return int */ inline int getPlacementUnitMaxPathLen(PlacementInfo::PlacementUnit *curPU) { auto &timingNodes = parentPackingCLB->getPlacementInfo()->getTimingInfo()->getSimplePlacementTimingInfo(); if (auto unpacked = dynamic_cast<PlacementInfo::PlacementUnpackedCell *>(curPU)) { if (unpacked->getCell()->isVirtualCell()) return 0; return timingNodes[unpacked->getCell()->getCellId()]->getLongestPathLength(); } else if (auto tmpMacro = dynamic_cast<PlacementInfo::PlacementMacro *>(curPU)) { int maxLen = 0; for (auto tmpCell : tmpMacro->getCells()) { if (tmpCell->isVirtualCell()) continue; int len = timingNodes[tmpCell->getCellId()]->getLongestPathLength(); if (len > maxLen) maxLen = len; } return maxLen; } return 0; } private: const unsigned int MaxNum_ControlSet = 4; const unsigned int MaxNum_FFinControlSet = 4; const unsigned int MaxNum_LUTSite = 8; int numMuxes = 0; /** * @brief the paraent CLB site for this cluster * */ PackingCLBSite *parentPackingCLB = nullptr; /** * @brief the paraent CLB site for this cluster * */ PlacementInfo *placementInfo = nullptr; /** * @brief the evaluation score of packing for this cluster * */ float scoreInSite = -100000000; /** * @brief a hash id to record the elements in this cluster * * Some clusters have the same elements (hashid) but they might have different index due to the combination * (packing) order. We can use this hash id to remove duplicate candidates. * */ int hashId = -3654; bool hashed = false; /** * @brief the unique id for each cluster * */ int id = -1; std::set<PlacementInfo::PlacementUnit *, Packing_PUcompare> PUs; /** * @brief the connectivity score for this cluster * * We want more nets become the internal nets inside the CLB sites so the number of nets between sites can * be reduced. * */ float totalConnectivityScore = 0; /** * @brief the HPWL term for the wirelength optimization * * We want the packing will not significantly increase the HPWL * */ float HPWLChange = 0; /** * @brief the cell number term in the cluster score * * We want the large PlacementUnits can have a relatively higher priority in packing since the high * displacement of these elements might lead to bad routing. * */ int totalCellNum = 0; /** * @brief the term of timing (paths) in the packing score * * We will accumulate the max length of paths for each elements in the cluster * */ int totalLen = 0; /** * @brief the configurable weight for the wirelength in the cluster score * */ float HPWLWeight = 0.01; // int muxF7Limit = 2; // int muxF8Limit = 1; // std::map<PlacementInfo::PlacementNet *, float> net2ConnectivityScore; // std::set<PlacementInfo::PlacementNet *> nets; /** * @brief the control set information for this cluster * * please note that some of these LUT/FFs are belong to CARRY chain, which is not shown in PUs * */ std::vector<PackedControlSet> FFControlSets; /** * @brief the set of LUTs have not been paired with other LUTs in the clutser * */ std::set<DesignInfo::DesignCell *> singleLUTs; /** * @brief the paired LUTs in the cluster * */ std::set<std::pair<DesignInfo::DesignCell *, DesignInfo::DesignCell *>> pairedLUTs; }; /** * @brief check whether all the PlacementUnit in the top cluster in the priority queue have been assigned to * this CLB site * * @return true if all the PlacementUnits in the top cluster in the priority queue have been assigned to * this CLB site * @return false if some of the PlacementUnits in the top cluster in the priority queue have NOT been assigned * to this CLB site yet (maybe unassigned yet or maybe assigned to some other CLB sites) */ inline bool isPQTopCompletelyAccptedByCells() { auto qTop = priorityQueue[0]; for (auto tmpPU : qTop->getPUs()) { if (PUId2PackingCLBSite[tmpPU->getId()] != this) { return false; } } return true; } /** * @brief find neighbor PlacementUnit around targetX/Y from the bin grid * * @param curCellType the given cell types * @param targetX target location X * @param targetY target location Y * @param displacementLowerbound the lower bound threshold of neighbors' displacement from the center (the * neighbors with low displacement might be tried by previous procedure) * @param displacementUpperbound the upper bound threshold of neighbors' displacement from the center * @param PUNumThreshold The threshold number of PlacementUnits found to stop the search procedure * @param PUId2PackingCLBSite The reference mapping indicating mapped PlacementUnits * @param y2xRatio a factor to tune the weights of the net spanning in Y-coordinate relative to the net spanning * in X-coordinate * @param res a input set to store resultant PUs (for inremental search if it is not nullptr) * @return std::set<PlacementInfo::PlacementUnit *, Packing_PUcompare>* */ std::set<PlacementInfo::PlacementUnit *, Packing_PUcompare> * findNeiborPUsFromBinGrid(DesignInfo::DesignCellType curCellType, float targetX, float targetY, float displacementLowerbound, float displacementUpperbound, int PUNumThreshold, const std::vector<PackingCLBSite *> &PUId2PackingCLBSite, float y2xRatio, std::set<PlacementInfo::PlacementUnit *, Packing_PUcompare> *res = nullptr); inline std::set<PlacementInfo::PlacementUnit *, Packing_PUcompare> &getNeighborPUs() { return neighborPUs; } /** * @brief sort the elements in the priority queue * */ void refreshPrioryQueue(); /** * @brief remove invalid clusters from the priority queue * * since some of the PlacementUnits have been determined to be mapped to some other sites, they should be * removed from the PQ * */ void removeInvalidClustersFromPQ(); /** * @brief remove invalid clusters from neighbor PlacementUnits * * since some of the PlacementUnits have been determined to be mapped to some other sites, they should be * removed from the neighbor PU set. Moreover, some of the candidate PUs are not compatible with the determined * set of PUs, so we need to remove them too. */ void removeInvalidPUsFromNeighborPUs(); /** * @brief remove clusters incompatible with determined cluster from PQ * * some of the clusters in the PQ are not compatible with the determined set of PUs, so we need to remove them * too. */ void removeClustersIncompatibleWithDetClusterFromPQ(); /** * @brief extend the clusters in the priority queue with the neighbor PlacementUnits * */ void findNewClustersWithNeighborPUs(); /** * @brief a iteration to pack PlacementUnit into a CLB site * * @param initial indicate whether it is the first packing iteration * @param debug dump debug information or not */ void updateStep(bool initial, bool debug = false); /** * @brief update the information of consistent PUs at the top of priority queue * * the PQ top might be kept updated but some of its PUs might be consistent, which we can make them as * determined PUs. */ void updateConsistentPUsInTop(); inline bool hasValidPQTop() { return priorityQueue.size(); } const PackingCLBCluster *getPriorityQueueTop() { assert(priorityQueue.size()); return priorityQueue[0]; } inline DeviceInfo::DeviceSite *getCLBSite() { return CLBSite; } inline float getY2xRatio() const { return y2xRatio; } inline float getDetScore() { return detScore; } inline PackingCLBCluster *getDeterminedClusterInSite() { return determinedClusterInSite; } inline void setDeterminedClusterInSite(PackingCLBCluster *tmpCluster) { determinedClusterInSite = tmpCluster; } inline float getHPWLChangeForPU(PlacementInfo::PlacementUnit *tmpPU) { float changeHPWL = 0; for (auto tmpNet : *tmpPU->getNetsSetPtr()) { if (tmpNet->getUnits().size() > 64) // ignore large net continue; float newHPWL = tmpNet->getNewHPWLByTrying(tmpPU, CLBSite->X(), CLBSite->Y(), y2xRatio); float oriHPWL = tmpNet->getHPWL(y2xRatio); if (std::isnan(newHPWL) || std::isnan(oriHPWL)) { #pragma omp critical { // TODO: Why there is such BUG??? int o = 0; std::cout << "curPU === \n" << tmpPU << "\n"; std::cout << "CLBSite->X()=" << CLBSite->X() << " CLBSite->Y()=" << CLBSite->Y() << " y2xRatio=" << y2xRatio << "\n"; for (auto tmpPU0 : tmpNet->getUnits()) { auto tmpPinOffset = tmpNet->getPinOffsetsInUnit()[o]; std::cout << "PUXoffset=" << tmpPinOffset.x << " PUYoffset=" << tmpPinOffset.y << "\n"; std::cout << "PU=" << tmpPU0 << "\n"; o++; } float newHPWL1 = tmpNet->getNewHPWLByTrying(tmpPU, CLBSite->X(), CLBSite->Y(), y2xRatio); float oriHPWL1 = tmpNet->getHPWL(y2xRatio); std::cout << "newHPWL1=" << newHPWL1 << " oriHPWL1=" << oriHPWL1 << "\n"; assert(false); } } changeHPWL += newHPWL - oriHPWL; } return changeHPWL; } inline void setDebug() { debug = true; } inline PlacementInfo *getPlacementInfo() const { return placementInfo; } /** * @brief add CARRY-related PlacementUnit into this CLB site * */ inline void addCarry() { assert(determinedClusterInSite == nullptr); assert(CARRYChain); determinedClusterInSite = new PackingCLBCluster(this); for (auto prefixedSingleLUT : conflictLUTs) { assert(fixedLUTsInPairs.find(prefixedSingleLUT) == fixedLUTsInPairs.end()); // these are fixed LUT Pairs, should not be added assert(determinedClusterInSite->addLUT(prefixedSingleLUT)); } for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { for (int k = 0; k < 4; k++) { if (slotMapping.FFs[i][j][k]) { // if (!slotMapping.FFs[i][j][k]->isVirtualCell()) bool succ = determinedClusterInSite->addFF(slotMapping.FFs[i][j][k], i * 2 + j); if (!succ) { determinedClusterInSite->addFF(slotMapping.FFs[i][j][k], i * 2 + j); std::cout << CARRYChain << " \n =====================================\n targetFF:" << slotMapping.FFs[i][j][k] << "\n"; std::cout << "slotMapping.FFs[i][j][k].CS=\n"; if (!slotMapping.FFs[i][j][k]->isVirtualCell()) { slotMapping.FFs[i][j][k]->getControlSetInfo()->display(); } std::cout << "offset: " << CARRYChainSiteOffset << "\n"; std::cout << "currentCluster:\n"; determinedClusterInSite->printMyself(); std::cout << "FF-FFSet:\n"; std::cout << DesignInfo::FFSRCompatible( slotMapping.FFs[i][j][k]->getOriCellType(), determinedClusterInSite->getFFControlSets()[0].getFFType()); std::cout << "FF-FF:\n"; std::cout << DesignInfo::FFSRCompatible( slotMapping.FFs[i][j][k]->getOriCellType(), determinedClusterInSite->getFFControlSets()[0].getFFs()[0]->getCellType()); std::cout << "i,j,k: " << i << ", " << j << ", " << k << "\n"; std::cout.flush(); } assert(succ); } } } } determinedClusterInSite->clusterHash(); determinedClusterInSite->updateScoreInSite(); } /** * @brief add LUTRAM-related PlacementUnit into this CLB site * */ inline void addLUTRAMMacro() { assert(determinedClusterInSite == nullptr); assert(LUTRAMMacro); determinedClusterInSite = new PackingCLBCluster(this); determinedClusterInSite->clusterHash(); determinedClusterInSite->updateScoreInSite(); } inline bool checkIsPrePackedSite() { return isCarrySite || isLUTRAMSite; } inline bool checkIsCarrySite() { return isCarrySite; } inline bool checkIsMuxSite() { if (determinedClusterInSite) { return determinedClusterInSite->getNumMuxes() > 0; } return false; } inline bool checkIsLUTRAMSite() { return isLUTRAMSite; } inline PlacementInfo::PlacementMacro *getCarryMacro() { return CARRYChain; } inline PlacementInfo::PlacementMacro *getLUTRAMMacro() { return LUTRAMMacro; } inline int getCarrySiteOffset() { return CARRYChainSiteOffset; } /** * @brief SiteBELMapping is a contain recording the mapping between cells and BELs. * * We hold the cell information in arrays of slots. * */ class SiteBELMapping { public: SiteBELMapping() { Carry = nullptr; for (int i = 0; i < 2; i++) { MuxF8[i] = nullptr; for (int j = 0; j < 2; j++) { MuxF7[i][j] = nullptr; for (int k = 0; k < 4; k++) { LUTs[i][j][k] = nullptr; FFs[i][j][k] = nullptr; } } } } ~SiteBELMapping() { } DesignInfo::DesignCell *LUTs[2][2][4]; // [bottom_Or_Top][6 or 5][which Slot] DesignInfo::DesignCell *FFs[2][2][4]; // [bottom_Or_Top][FF or FF2][which Slot] DesignInfo::DesignCell *MuxF7[2][2]; // [bottom_Or_Top][which Slot] DesignInfo::DesignCell *MuxF8[2]; DesignInfo::DesignCell *Carry; const std::string MuxF8SlotNames[2] = {"F8MUX_BOT", "F8MUX_TOP"}; const std::string MuxF7SlotNames[2][2] = {{"F7MUX_AB", "F7MUX_CD"}, {"F7MUX_EF", "F7MUX_GH"}}; }; void mapCarryRelatedCellsToSlots(PlacementInfo::PlacementMacro *_CARRYChain, float siteOffset); void mapLUTRAMRelatedCellsToSlots(PlacementInfo::PlacementMacro *_LUTRAMMacro); void finalMapToSlotsForCarrySite(); /** * @brief map cells in MUXF8 macro to CLB slot * * @param muxF8Offset the offset of the MUXF8 in the CLB site. There is only two slots for MUXF8 * @param MUXF8Macro the pointer of the MUXF8 needed to be mapped into current site */ void mapMuxF8Macro(int muxF8Offset, PlacementInfo::PlacementMacro *MUXF8Macro); /** * @brief map cells in MUXF7 macro to CLB slot * * @param muxF7Offset the offset of the MUXF7 in the CLB site. There is only two slots for MUXF8 * @param MUXF7Macro the pointer of the MUXF7 needed to be mapped into current site */ void mapMuxF7Macro(int halfCLBOffset, PlacementInfo::PlacementMacro *MUXF7Macro); /** * @brief find the correspdnding FF control set id for a given Mux macro (this mux macro should have been mapped * to a control set in this site) * * @param MUXF8Macro a given Mux macro * @return int */ int findMuxFromHalfCLB(PlacementInfo::PlacementMacro *MUXF8Macro); /** * @brief find the slots in the site for Muxes * */ void greedyMapMuxForCommonLUTFFInSite(); /** * @brief greedily find the exact slots for the LUTs/FFs in the site * */ void greedyMapForCommonLUTFFInSite(); /** * @brief finally map LUTs/FFs to the exact slots in the sites * */ void finalMapToSlotsForCommonLUTFFInSite(); /** * @brief finally map the elements (CARRY/MUX/LUT/FF) packed in this site into the slots in the site * */ void finalMapToSlots() { if (determinedClusterInSite) { if (checkIsCarrySite()) { finalMapToSlotsForCarrySite(); } else if (checkIsLUTRAMSite()) { } else if (checkIsMuxSite()) { greedyMapMuxForCommonLUTFFInSite(); } else if (!checkIsPrePackedSite() && !checkIsMuxSite()) { // LUTS-FFs Packing finalMapToSlotsForCommonLUTFFInSite(); } else { // assert(false && "undefined packing situation"); } } } /** * @brief Get the fixed pairs of LUTs which should NOT be broken * * @return std::set<std::pair<DesignInfo::DesignCell *, DesignInfo::DesignCell *>>& */ std::set<std::pair<DesignInfo::DesignCell *, DesignInfo::DesignCell *>> &getFixedPairedLUTs() { return fixedPairedLUTs; } /** * @brief Get the LUTs which CANNOT be paired * * @return std::set<DesignInfo::DesignCell *>& */ std::set<DesignInfo::DesignCell *> &getConflictLUTs() { return conflictLUTs; } /** * @brief check whether a given cell is unpackable * * some driver LUTs of CARRY/MUX cannot be paired * * @param tmpCell the given cell * @return true if the cell CANNOT be paired with other LUTs * @return false if the cell CAN be paired with other LUTs */ bool conflictLUTsContain(DesignInfo::DesignCell *tmpCell) { return conflictLUTs.find(tmpCell) != conflictLUTs.end(); } /** * @brief Get the slot(BEL) mapping of the cells * * @return const SiteBELMapping& */ const SiteBELMapping &getSlotMapping() const { return slotMapping; } /** * @brief check whether the FF/LUT are directly connected * * @param FF2LUT * @param tmpLUT * @param tmpFF * @return int */ inline int checkDirectLUTFFConnect(std::map<DesignInfo::DesignCell *, DesignInfo::DesignCell *> &FF2LUT, DesignInfo::DesignCell *tmpLUT, DesignInfo::DesignCell *tmpFF) { if (!tmpFF || !tmpLUT) return 0; if (FF2LUT.find(tmpFF) == FF2LUT.end()) return 0; return FF2LUT[tmpFF] == tmpLUT; } private: PlacementInfo *placementInfo; DeviceInfo::DeviceSite *CLBSite; /** * @brief specify how many iterations a PlacementUnit should stay at the top priority of a * site before we finally map it to the site * */ int unchangedIterationThr = 3; /** * @brief the threshold number of cells for site * */ unsigned int numNeighbor = 10; /** * @brief the increase step of the neighbor search diameter * */ float deltaD = 1.0; /** * @brief current neighbor search diameter * */ float curD = 0; /** * @brief the maximum constraint of the neighbor search diameter * */ float maxD = 10; /** * @brief the size of priority queue (the low-priority candidates will be removed) * */ unsigned int PQSize = 10; /** * @brief a factor to tune the weights of the net spanning in Y-coordinate relative to the net spanning * in X-coordinate * */ float y2xRatio = 1.0; /** * @brief the factor of HPWL overhead in packing evaluation for a cell * */ float HPWLWeight = 0.01; int unchangeIterationCnt = 0; std::set<PlacementInfo::PlacementUnit *, Packing_PUcompare> neighborPUs; // std::map<PlacementInfo::PlacementUnit *, float> PU2HPWLChange; std::vector<PackingCLBCluster *> seedClusters; std::vector<PackingCLBCluster *> priorityQueue; std::map<PlacementInfo::PlacementUnit *, int> PU2TopCnt; const std::vector<PackingCLBSite *> &PUId2PackingCLBSite; PackingCLBCluster *determinedClusterInSite = nullptr; float detScore = 0; bool isCarrySite = false; bool isLUTRAMSite = false; PlacementInfo::PlacementMacro *CARRYChain = nullptr; PlacementInfo::PlacementMacro *LUTRAMMacro = nullptr; int CARRYChainSiteOffset = -1; bool debug = false; std::set<DesignInfo::DesignCell *> mappedCells; std::set<DesignInfo::DesignCell *> mappedLUTs; std::set<DesignInfo::DesignCell *> mappedFFs; std::set<std::pair<DesignInfo::DesignCell *, DesignInfo::DesignCell *>> fixedPairedLUTs; std::set<DesignInfo::DesignCell *> fixedLUTsInPairs; std::set<DesignInfo::DesignCell *> conflictLUTs; SiteBELMapping slotMapping; }; /** * @brief helper struct for candidate site sorting * */ typedef struct _siteWithScore { PackingCLBSite *site; float score; _siteWithScore(PackingCLBSite *site, float score) : site(site), score(score) { } } siteWithScore; /** * @brief PULocation is a helper class to find the neighbor PlacementUnits with KD-Tree * */ class PULocation : public std::array<float, 2> { public: // dimension of the Point static const int DIM = 2; PULocation() { assert(false); } PULocation(PlacementInfo::PlacementUnit *tmpPU) : PU(tmpPU) { (*this)[0] = tmpPU->X(); (*this)[1] = tmpPU->Y(); } PULocation &operator=(const PULocation &anotherPULocation) { (*this)[0] = anotherPULocation[0]; (*this)[1] = anotherPULocation[1]; PU = anotherPULocation.getPU(); return (*this); } inline PlacementInfo::PlacementUnit *getPU() const { return PU; } private: PlacementInfo::PlacementUnit *PU; }; /** * @brief Load the information of some packed macros like LUTRAM/Crossing-Clock-Domain FFs/Carry Chains have been * legalized. * * @param tmpMacro */ void prePackLegalizedMacros(PlacementInfo::PlacementMacro *tmpMacro); /** * @brief update the packing cluster candidates for each CLB site and determine some mapping from elements to sites * according to the "confidence". * * @param initial indicate whether it is the first round of the packing iteration * @param debug whether print out debugging information */ void packCLBsIteration(bool initial, bool debug = false); /** * @brief packing the PlacementUnits (which are compatible to CLB sites) into CLB sites * * @param packIterNum the number of packing iteration * @param doExceptionHandling conduct exception handling if some PlacementUnits fail to be legalized during the * parallel procedure * @param debug whether print out debugging information */ void packCLBs(int packIterNum, bool doExceptionHandling, bool debug = false); /** * @brief handle the PlacementUnits that cannot be packed during the parallel procedure * * @param verbose whether dumping information for debugging */ void exceptionHandling(bool verbose = false); /** * @brief find the neighbors of specific cell type with given coordinate center * * @param curCellType the given cell type * @param targetX center X * @param targetY center Y * @param displacementLowerbound the lower bound threshold of neighbors' displacement from the center (the neighbors * with low displacement might be tried by previous procedure) * @param displacementUpperbound the upper bound threshold of neighbors' displacement from the center * @param y2xRatio a factor to tune the weights of the net spanning in Y-coordinate relative to the net spanning * in X-coordinate * @return std::vector<DeviceInfo::DeviceSite *>* */ std::vector<DeviceInfo::DeviceSite *> *findNeiborSitesFromBinGrid(DesignInfo::DesignCellType curCellType, float targetX, float targetY, float displacementLowerbound, float displacementUpperbound, float y2xRatio); /** * @brief try to find a legal location for the given PlacementUnit when most of PlacementUnits are packed into CLB * site * * @param curPU a PlacementUnit which has NOT been legalized/packed * @param displacementThreshold the displacement threshold to find the neighbor site candidate * @param verbose whether print out debugging information * @return true if the PlacementUnit is legalized/packed successfully * @return false if the PlacementUnit CANNOT be legalized/packed successfully in this iteration */ bool exceptionPULegalize(PlacementInfo::PlacementUnit *curPU, float displacementThreshold, bool verbose); /** * @brief try to rip up the packing for a given CLB site and pack the given PlacementUnit in the site. The evicted * PlacementUnits which are originally packed in this site and cannot be packed now will try to find other CLB sites * to pack * * @param curTargetPackingSite a given CLB site * @param curPU a given PlacementUnit * @param displacementThreshold the displacement threshold for the evicted PlacementUnits to find the neighbor site * candidates * @param packingSite2DeterminedCluster the mapping between PlacementUnit and CLB sites * @param verbose whether print out debugging information * @return true if such re-packing is sucessful for the involved CLB sites and PlacementUnits * @return false if such re-packing FAILS for the involved CLB sites and PlacementUnits */ bool ripUpAndLegalizae( PackingCLBSite *curTargetPackingSite, PlacementInfo::PlacementUnit *curPU, float displacementThreshold, std::map<PackingCLBSite *, PackingCLBSite::PackingCLBCluster *> &packingSite2DeterminedCluster, bool verbose); /** * @brief check the packing status for all the PlacementUnits * */ void checkPackedPUsAndUnpackedPUs(); /** * @brief update the location of PlacementUnits according to the packing result * */ void setPULocationToPackedSite(); /** * @brief Update the macros in PlacementInfo by regarding those elements in one CLB site as a macro * * @param setPUPseudoNetToCLBSite whether set the legalization pseudo nets for those packed PlacementUnits after * updateing * @param setCLBFixed whether fix the locations of the packed PlacementUnits after updateing */ void updatePackedMacro(bool setPUPseudoNetToCLBSite = false, bool setCLBFixed = false); /** * @brief set the packed attribute for the packed PlacementUnits * */ void setPUsToBePacked(); void dumpFinalPacking(); void dumpDSPBRAMPlacementTcl(std::ofstream &outfileTcl); void dumpCLBPlacementTcl(std::ofstream &outfileTcl, bool packingRelatedToLUT6_2); void dumpPlacementTcl(std::string dumpTclFile); private: DesignInfo *designInfo; DeviceInfo *deviceInfo; PlacementInfo *placementInfo; std::map<std::string, std::string> &JSONCfg; /** * @brief specify how many iterations a PlacementUnit should stay at the top priority of a * site before we finally map it to the site * */ int unchangedIterationThr; /** * @brief the threshold number of cells for site * */ int numNeighbor; /** * @brief the increase step of the neighbor search diameter * */ float deltaD; /** * @brief current neighbor search diameter * */ float curD; /** * @brief the maximum constraint of the neighbor search diameter * */ float maxD; /** * @brief the size of priority queue (the low-priority candidates will be removed) * */ int PQSize; /** * @brief the factor of HPWL overhead in packing evaluation for a cell * */ float HPWLWeight; std::string packerName; int DumpCLBPackingCnt = 0; std::vector<PackingCLBSite *> PUId2PackingCLBSite; std::vector<PackingCLBSite *> packingSites; std::vector<std::vector<PackingCLBSite *>> clockColumns2PackingSites; std::vector<PackingCLBSite *> PUId2PackingCLBSiteCandidate; std::vector<PlacementInfo::PlacementUnit *> &placementUnits; std::vector<PlacementInfo::PlacementUnpackedCell *> &placementUnpackedCells; std::vector<PlacementInfo::PlacementMacro *> &placementMacros; std::set<DesignInfo::DesignCell *> &cellInMacros; std::map<int, PlacementInfo::PlacementUnit *> &cellId2PlacementUnit; std::map<DeviceInfo::DeviceSite *, PackingCLBSite *> deviceSite2PackingSite; std::set<PlacementInfo::PlacementUnit *, Packing_PUcompare> packedPUs; std::set<PlacementInfo::PlacementUnit *, Packing_PUcompare> unpackedPUs; std::vector<PlacementInfo::PlacementUnit *> unpackedPUsVec; std::map<PackingCLBSite *, PlacementInfo::PlacementUnit *> involvedPackingSite2PU; std::vector<PULocation> PUPoints; float y2xRatio = 1.0; }; std::ostream &operator<<(std::ostream &os, const ParallelCLBPacker::PackingCLBSite::PackingCLBCluster *tmpCluster); // inline bool operator<(const ParallelCLBPacker::PackingCLBSite::PackingCLBCluster &A, // const ParallelCLBPacker::PackingCLBSite::PackingCLBCluster &B) // { // return A.getScoreInSite() < B.getScoreInSite(); // } #endif
vacc.c
/** * @file vacc.c * @ingroup Vacc * @author Nathan Baker * @brief Class Vacc methods * @version $Id$ * @attention * @verbatim * * APBS -- Adaptive Poisson-Boltzmann Solver * * Nathan A. Baker (nathan.baker@pnnl.gov) * Pacific Northwest National Laboratory * * Additional contributing authors listed in the code documentation. * * Copyright (c) 2010-2020 Battelle Memorial Institute. Developed at the * Pacific Northwest National Laboratory, operated by Battelle Memorial * Institute, Pacific Northwest Division for the U.S. Department of Energy. * * Portions Copyright (c) 2002-2010, Washington University in St. Louis. * Portions Copyright (c) 2002-2020, Nathan A. Baker. * Portions Copyright (c) 1999-2002, The Regents of the University of * California. * Portions Copyright (c) 1995, Michael Holst. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the developer nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * @endverbatim */ #include "vacc.h" VEMBED(rcsid="$Id$") #if !defined(VINLINE_VACC) VPUBLIC unsigned long int Vacc_memChk(Vacc *thee) { if (thee == VNULL) return 0; return Vmem_bytes(thee->mem); } #endif /* if !defined(VINLINE_VACC) */ /** * @brief Determines if a point is within the union of the spheres centered * at the atomic centers with radii equal to the sum of their van der * Waals radii and the probe radius. Does not include contributions * from the specified atom. * @returns 1 if accessible (outside the inflated van der Waals radius), 0 * otherwise * @author Nathan Baker */ VPRIVATE int ivdwAccExclus( Vacc *thee, /** Accessibility object */ double center[3], /** Position to test */ double radius, /** Radius of probe */ int atomID /** ID of atom to ignore */ ) { int iatom; double dist2, *apos; Vatom *atom; VclistCell *cell; VASSERT(thee != VNULL); /* We can only test probes with radii less than the max specified */ if (radius > Vclist_maxRadius(thee->clist)) { Vnm_print(2, "Vacc_ivdwAcc: got radius (%g) bigger than max radius (%g)\n", radius, Vclist_maxRadius(thee->clist)); VASSERT(0); } /* Get the relevant cell from the cell list */ cell = Vclist_getCell(thee->clist, center); /* If we have no cell, then no atoms are nearby and we're definitely * accessible */ if (cell == VNULL) { return 1; } /* Otherwise, check for overlap with the atoms in the cell */ for (iatom=0; iatom<cell->natoms; iatom++) { atom = cell->atoms[iatom]; // We don't actually need to test this if the atom IDs do match; don't compute this if we're comparing atom against itself. if (atom->id == atomID) continue; apos = atom->position; dist2 = VSQR(center[0]-apos[0]) + VSQR(center[1]-apos[1]) + VSQR(center[2]-apos[2]); if (dist2 < VSQR(atom->radius+radius)){ return 0; } } /* If we're still here, then the point is accessible */ return 1; } VPUBLIC Vacc* Vacc_ctor(Valist *alist, Vclist *clist, double surf_density /* Surface density */ ) { Vacc *thee = VNULL; /* Set up the structure */ thee = (Vacc*)Vmem_malloc(VNULL, 1, sizeof(Vacc) ); VASSERT( thee != VNULL); VASSERT( Vacc_ctor2(thee, alist, clist, surf_density)); return thee; } /** Check and store parameters passed to constructor */ VPRIVATE int Vacc_storeParms(Vacc *thee, Valist *alist, Vclist *clist, double surf_density /* Surface density */ ) { int nsphere, iatom; double maxrad = 0.0, maxarea, rad; Vatom *atom; if (alist == VNULL) { Vnm_print(2, "Vacc_storeParms: Got NULL Valist!\n"); return 0; } else thee->alist = alist; if (clist == VNULL) { Vnm_print(2, "Vacc_storeParms: Got NULL Vclist!\n"); return 0; } else thee->clist = clist; thee->surf_density = surf_density; /* Loop through the atoms to determine the maximum radius */ maxrad = 0.0; for (iatom=0; iatom<Valist_getNumberAtoms(alist); iatom++) { atom = Valist_getAtom(alist, iatom); rad = Vatom_getRadius(atom); if (rad > maxrad) maxrad = rad; } maxrad = maxrad + Vclist_maxRadius(thee->clist); maxarea = 4.0*VPI*maxrad*maxrad; nsphere = (int)ceil(maxarea*surf_density); Vnm_print(0, "Vacc_storeParms: Surf. density = %g\n", surf_density); Vnm_print(0, "Vacc_storeParms: Max area = %g\n", maxarea); thee->refSphere = VaccSurf_refSphere(thee->mem, nsphere); Vnm_print(0, "Vacc_storeParms: Using %d-point reference sphere\n", thee->refSphere->npts); return 1; } /** Allocate (and clear) space for storage */ VPRIVATE int Vacc_allocate(Vacc *thee) { int i, natoms; natoms = Valist_getNumberAtoms(thee->alist); thee->atomFlags = (int*)Vmem_malloc(thee->mem, natoms, sizeof(int)); if (thee->atomFlags == VNULL) { Vnm_print(2, "Vacc_allocate: Failed to allocate %d (int)s for atomFlags!\n", natoms); return 0; } for (i=0; i<natoms; i++) (thee->atomFlags)[i] = 0; return 1; } VPUBLIC int Vacc_ctor2(Vacc *thee, Valist *alist, Vclist *clist, double surf_density ) { /* Check and store parameters */ if (!Vacc_storeParms(thee, alist, clist, surf_density)) { Vnm_print(2, "Vacc_ctor2: parameter check failed!\n"); return 0; } /* Set up memory management object */ thee->mem = Vmem_ctor("APBS::VACC"); if (thee->mem == VNULL) { Vnm_print(2, "Vacc_ctor2: memory object setup failed!\n"); return 0; } /* Setup and check probe */ thee->surf = VNULL; /* Allocate space */ if (!Vacc_allocate(thee)) { Vnm_print(2, "Vacc_ctor2: memory allocation failed!\n"); return 0; } return 1; } VPUBLIC void Vacc_dtor(Vacc **thee) { if ((*thee) != VNULL) { Vacc_dtor2(*thee); Vmem_free(VNULL, 1, sizeof(Vacc), (void **)thee); (*thee) = VNULL; } } VPUBLIC void Vacc_dtor2(Vacc *thee) { int i, natoms; natoms = Valist_getNumberAtoms(thee->alist); Vmem_free(thee->mem, natoms, sizeof(int), (void **)&(thee->atomFlags)); if (thee->refSphere != VNULL) { VaccSurf_dtor(&(thee->refSphere)); thee->refSphere = VNULL; } if (thee->surf != VNULL) { for (i=0; i<natoms; i++) VaccSurf_dtor(&(thee->surf[i])); Vmem_free(thee->mem, natoms, sizeof(VaccSurf *), (void **)&(thee->surf)); thee->surf = VNULL; } Vmem_dtor(&(thee->mem)); } VPUBLIC double Vacc_vdwAcc(Vacc *thee, double center[3] ) { VclistCell *cell; Vatom *atom; int iatom; double *apos, dist2; /* Get the relevant cell from the cell list */ cell = Vclist_getCell(thee->clist, center); /* If we have no cell, then no atoms are nearby and we're definitely * accessible */ if (cell == VNULL) return 1.0; /* Otherwise, check for overlap with the atoms in the cell */ for (iatom=0; iatom<cell->natoms; iatom++) { atom = cell->atoms[iatom]; apos = Vatom_getPosition(atom); dist2 = VSQR(center[0]-apos[0]) + VSQR(center[1]-apos[1]) + VSQR(center[2]-apos[2]); if (dist2 < VSQR(Vatom_getRadius(atom))) return 0.0; } /* If we're still here, then the point is accessible */ return 1.0; } VPUBLIC double Vacc_ivdwAcc(Vacc *thee, double center[3], double radius ) { return (double)ivdwAccExclus(thee, center, radius, -1); } VPUBLIC void Vacc_splineAccGradAtomNorm(Vacc *thee, double center[VAPBS_DIM], double win, double infrad, Vatom *atom, double *grad ) { int i; double dist, *apos, arad, sm, sm2, w2i, /* inverse of win squared */ w3i, /* inverse of win cubed */ mygrad, mychi = 1.0; /* Char. func. value for given atom */ VASSERT(thee != NULL); /* Inverse squared window parameter */ w2i = 1.0/(win*win); w3i = 1.0/(win*win*win); /* The grad is zero by default */ for (i=0; i<VAPBS_DIM; i++) grad[i] = 0.0; /* *** CALCULATE THE CHARACTERISTIC FUNCTION VALUE FOR THIS ATOM AND THE * *** MAGNITUDE OF THE FORCE *** */ apos = Vatom_getPosition(atom); /* Zero-radius atoms don't contribute */ if (Vatom_getRadius(atom) > 0.0) { arad = Vatom_getRadius(atom) + infrad; dist = VSQRT(VSQR(apos[0]-center[0]) + VSQR(apos[1]-center[1]) + VSQR(apos[2]-center[2])); /* If we're inside an atom, the entire characteristic function * will be zero and the grad will be zero, so we can stop */ if (dist < (arad - win)) return; /* Likewise, if we're outside the smoothing window, the characteristic * function is unity and the grad will be zero, so we can stop */ else if (dist > (arad + win)) return; /* Account for floating point error at the border * NAB: COULDN'T THESE TESTS BE COMBINED AS BELOW * (Vacc_splineAccAtom)? */ else if ((VABS(dist - (arad - win)) < VSMALL) || (VABS(dist - (arad + win)) < VSMALL)) return; /* If we're inside the smoothing window */ else { sm = dist - arad + win; sm2 = VSQR(sm); mychi = 0.75*sm2*w2i -0.25*sm*sm2*w3i; mygrad = 1.5*sm*w2i - 0.75*sm2*w3i; } /* Now assemble the grad vector */ VASSERT(mychi > 0.0); for (i=0; i<VAPBS_DIM; i++) grad[i] = -(mygrad/mychi)*((center[i] - apos[i])/dist); } } VPUBLIC void Vacc_splineAccGradAtomUnnorm(Vacc *thee, double center[VAPBS_DIM], double win, double infrad, Vatom *atom, double *grad ) { int i; double dist, *apos, arad, sm, sm2, w2i, /* Inverse of win squared */ w3i, /* Inverse of win cubed */ mygrad, mychi = 1.0; /* Char. func. value for given atom */ VASSERT(thee != NULL); /* Inverse squared window parameter */ w2i = 1.0/(win*win); w3i = 1.0/(win*win*win); /* The grad is zero by default */ for (i=0; i<VAPBS_DIM; i++) grad[i] = 0.0; /* *** CALCULATE THE CHARACTERISTIC FUNCTION VALUE FOR THIS ATOM AND THE * *** MAGNITUDE OF THE FORCE *** */ apos = Vatom_getPosition(atom); /* Zero-radius atoms don't contribute */ if (Vatom_getRadius(atom) > 0.0) { arad = Vatom_getRadius(atom) + infrad; dist = VSQRT(VSQR(apos[0]-center[0]) + VSQR(apos[1]-center[1]) + VSQR(apos[2]-center[2])); /* If we're inside an atom, the entire characteristic function * will be zero and the grad will be zero, so we can stop */ if (dist < (arad - win)) return; /* Likewise, if we're outside the smoothing window, the characteristic * function is unity and the grad will be zero, so we can stop */ else if (dist > (arad + win)) return; /* Account for floating point error at the border * NAB: COULDN'T THESE TESTS BE COMBINED AS BELOW * (Vacc_splineAccAtom)? */ else if ((VABS(dist - (arad - win)) < VSMALL) || (VABS(dist - (arad + win)) < VSMALL)) return; /* If we're inside the smoothing window */ else { sm = dist - arad + win; sm2 = VSQR(sm); mychi = 0.75*sm2*w2i -0.25*sm*sm2*w3i; mygrad = 1.5*sm*w2i - 0.75*sm2*w3i; } /* Now assemble the grad vector */ VASSERT(mychi > 0.0); for (i=0; i<VAPBS_DIM; i++) grad[i] = -(mygrad)*((center[i] - apos[i])/dist); } } VPUBLIC double Vacc_splineAccAtom(Vacc *thee, double center[VAPBS_DIM], double win, double infrad, Vatom *atom ) { double dist, *apos, arad, sm, sm2, w2i, /* Inverse of win squared */ w3i, /* Inverse of win cubed */ value, stot, sctot; VASSERT(thee != NULL); /* Inverse squared window parameter */ w2i = 1.0/(win*win); w3i = 1.0/(win*win*win); apos = Vatom_getPosition(atom); /* Zero-radius atoms don't contribute */ if (Vatom_getRadius(atom) > 0.0) { arad = Vatom_getRadius(atom) + infrad; stot = arad + win; sctot = VMAX2(0, (arad - win)); dist = VSQRT(VSQR(apos[0]-center[0]) + VSQR(apos[1]-center[1]) + VSQR(apos[2]-center[2])); /* If we're inside an atom, the entire characteristic function * will be zero */ if ((dist < sctot) || (VABS(dist - sctot) < VSMALL)){ value = 0.0; /* We're outside the smoothing window */ } else if ((dist > stot) || (VABS(dist - stot) < VSMALL)) { value = 1.0; /* We're inside the smoothing window */ } else { sm = dist - arad + win; sm2 = VSQR(sm); value = 0.75*sm2*w2i - 0.25*sm*sm2*w3i; } } else value = 1.0; return value; } /** * @brief Fast spline-based surface computation subroutine * @returns Spline value * @author Todd Dolinsky and Nathan Baker */ VPRIVATE double splineAcc( Vacc *thee, /** Accessibility object */ double center[VAPBS_DIM], /** Point at which the acc is to be * evaluated */ double win, /** Spline window */ double infrad, /** Radius to inflate atomic radius */ VclistCell *cell /** Cell of atom objects */ ) { int atomID, iatom; Vatom *atom; double value = 1.0; VASSERT(thee != NULL); /* Now loop through the atoms assembling the characteristic function */ for (iatom=0; iatom<cell->natoms; iatom++) { atom = cell->atoms[iatom]; atomID = atom->id; /* Check to see if we've counted this atom already */ if ( !(thee->atomFlags[atomID]) ) { thee->atomFlags[atomID] = 1; value *= Vacc_splineAccAtom(thee, center, win, infrad, atom); if (value < VSMALL) return value; } } return value; } VPUBLIC double Vacc_splineAcc(Vacc *thee, double center[VAPBS_DIM], double win, double infrad) { VclistCell *cell; Vatom *atom; int iatom, atomID; VASSERT(thee != NULL); if (Vclist_maxRadius(thee->clist) < (win + infrad)) { Vnm_print(2, "Vacc_splineAcc: Vclist has max_radius=%g;\n", Vclist_maxRadius(thee->clist)); Vnm_print(2, "Vacc_splineAcc: Insufficient for win=%g, infrad=%g\n", win, infrad); VASSERT(0); } /* Get a cell or VNULL; in the latter case return 1.0 */ cell = Vclist_getCell(thee->clist, center); if (cell == VNULL) return 1.0; /* First, reset the list of atom flags * NAB: THIS SEEMS VERY INEFFICIENT */ for (iatom=0; iatom<cell->natoms; iatom++) { atom = cell->atoms[iatom]; atomID = atom->id; thee->atomFlags[atomID] = 0; } return splineAcc(thee, center, win, infrad, cell); } VPUBLIC void Vacc_splineAccGrad(Vacc *thee, double center[VAPBS_DIM], double win, double infrad, double *grad) { int iatom, i, atomID; double acc = 1.0; double tgrad[VAPBS_DIM]; VclistCell *cell; Vatom *atom = VNULL; VASSERT(thee != NULL); if (Vclist_maxRadius(thee->clist) < (win + infrad)) { Vnm_print(2, "Vacc_splineAccGrad: Vclist max_radius=%g;\n", Vclist_maxRadius(thee->clist)); Vnm_print(2, "Vacc_splineAccGrad: Insufficient for win=%g, infrad=%g\n", win, infrad); VASSERT(0); } /* Reset the gradient */ for (i=0; i<VAPBS_DIM; i++) grad[i] = 0.0; /* Get the cell; check for nullity */ cell = Vclist_getCell(thee->clist, center); if (cell == VNULL) return; /* Reset the list of atom flags */ for (iatom=0; iatom<cell->natoms; iatom++) { atom = cell->atoms[iatom]; atomID = atom->id; thee->atomFlags[atomID] = 0; } /* Get the local accessibility */ acc = splineAcc(thee, center, win, infrad, cell); /* Accumulate the gradient of all local atoms */ if (acc > VSMALL) { for (iatom=0; iatom<cell->natoms; iatom++) { atom = cell->atoms[iatom]; Vacc_splineAccGradAtomNorm(thee, center, win, infrad, atom, tgrad); } for (i=0; i<VAPBS_DIM; i++) grad[i] += tgrad[i]; } for (i=0; i<VAPBS_DIM; i++) grad[i] *= -acc; } VPUBLIC double Vacc_molAcc(Vacc *thee, double center[VAPBS_DIM], double radius) { double rc; /* ******* CHECK IF OUTSIDE ATOM+PROBE RADIUS SURFACE ***** */ if (Vacc_ivdwAcc(thee, center, radius) == 1.0) { /* Vnm_print(2, "DEBUG: ivdwAcc = 1.0\n"); */ rc = 1.0; /* ******* CHECK IF INSIDE ATOM RADIUS SURFACE ***** */ } else if (Vacc_vdwAcc(thee, center) == 0.0) { /* Vnm_print(2, "DEBUG: vdwAcc = 0.0\n"); */ rc = 0.0; /* ******* CHECK IF OUTSIDE MOLECULAR SURFACE ***** */ } else { /* Vnm_print(2, "DEBUG: calling fastMolAcc...\n"); */ rc = Vacc_fastMolAcc(thee, center, radius); } return rc; } VPUBLIC double Vacc_fastMolAcc(Vacc *thee, double center[VAPBS_DIM], double radius) { Vatom *atom; VaccSurf *surf; VclistCell *cell; int ipt, iatom, atomID; double dist2, rad2; rad2 = radius*radius; /* Check to see if the SAS has been defined */ if (thee->surf == VNULL) Vacc_SASA(thee, radius); /* Get the cell associated with this point */ cell = Vclist_getCell(thee->clist, center); if (cell == VNULL) { Vnm_print(2, "Vacc_fastMolAcc: unexpected VNULL VclistCell!\n"); return 1.0; } /* Loop through all the atoms in the cell */ for (iatom=0; iatom<cell->natoms; iatom++) { atom = cell->atoms[iatom]; atomID = Vatom_getAtomID(atom); surf = thee->surf[atomID]; /* Loop through all SAS points associated with this atom */ for (ipt=0; ipt<surf->npts; ipt++) { /* See if we're within a probe radius of the point */ dist2 = VSQR(center[0]-(surf->xpts[ipt])) + VSQR(center[1]-(surf->ypts[ipt])) + VSQR(center[2]-(surf->zpts[ipt])); if (dist2 < rad2) return 1.0; } } /* If all else failed, we are not inside the molecular surface */ return 0.0; } #if defined(HAVE_MC_H) VPUBLIC void Vacc_writeGMV(Vacc *thee, double radius, int meth, Gem *gm, char *iodev, char *iofmt, char *iohost, char *iofile) { double *accVals[MAXV], coord[3]; Vio *sock; int ivert, icoord; for (ivert=0; ivert<MAXV; ivert++) accVals[ivert] = VNULL; accVals[0] = (void *)Vmem_malloc(thee->mem, Gem_numVV(gm), sizeof(double)); accVals[1] = (void *)Vmem_malloc(thee->mem, Gem_numVV(gm), sizeof(double)); for (ivert=0; ivert<Gem_numVV(gm); ivert++) { for (icoord=0;icoord<3;icoord++) coord[icoord] = VV_coord(Gem_VV(gm, ivert), icoord); if (meth == 0) { accVals[0][ivert] = Vacc_molAcc(thee, coord, radius); accVals[1][ivert] = Vacc_molAcc(thee, coord, radius); } else if (meth == 1) { accVals[0][ivert] = Vacc_ivdwAcc(thee, coord, radius); accVals[1][ivert] = Vacc_ivdwAcc(thee, coord, radius); } else if (meth == 2) { accVals[0][ivert] = Vacc_vdwAcc(thee, coord); accVals[1][ivert] = Vacc_vdwAcc(thee, coord); } else VASSERT(0); } sock = Vio_ctor(iodev, iofmt, iohost, iofile, "w"); Gem_writeGMV(gm, sock, 1, accVals); Vio_dtor(&sock); Vmem_free(thee->mem, Gem_numVV(gm), sizeof(double), (void **)&(accVals[0])); Vmem_free(thee->mem, Gem_numVV(gm), sizeof(double), (void **)&(accVals[1])); } #endif /* defined(HAVE_MC_H) */ VPUBLIC double Vacc_SASA(Vacc *thee, double radius ) { int i, natom; double area; //*apos; // gcc says unused Vatom *atom; VaccSurf *asurf; time_t ts; // PCE: temp ts = clock(); //unsigned long long mbeg; // gcc says unused natom = Valist_getNumberAtoms(thee->alist); /* Check to see if we need to build the surface */ if (thee->surf == VNULL) { thee->surf = Vmem_malloc(thee->mem, natom, sizeof(VaccSurf *)); #if defined(DEBUG_MAC_OSX_OCL) || defined(DEBUG_MAC_OSX_STANDARD) #include "mach_chud.h" machm_(&mbeg); #pragma omp parallel for private(i,atom) #endif for (i=0; i<natom; i++) { atom = Valist_getAtom(thee->alist, i); /* NOTE: RIGHT NOW WE DO THIS FOR THE ENTIRE MOLECULE WHICH IS * INCREDIBLY INEFFICIENT, PARTICULARLY DURING FOCUSING!!! */ thee->surf[i] = Vacc_atomSurf(thee, atom, thee->refSphere, radius); } } /* Calculate the area */ area = 0.0; for (i=0; i<natom; i++) { atom = Valist_getAtom(thee->alist, i); asurf = thee->surf[i]; /* See if this surface needs to be rebuilt */ if (asurf->probe_radius != radius) { Vnm_print(2, "Vacc_SASA: Warning -- probe radius changed from %g to %g!\n", asurf->probe_radius, radius); VaccSurf_dtor2(asurf); thee->surf[i] = Vacc_atomSurf(thee, atom, thee->refSphere, radius); asurf = thee->surf[i]; } area += (asurf->area); } #if defined(DEBUG_MAC_OSX_OCL) || defined(DEBUG_MAC_OSX_STANDARD) mets_(&mbeg, "Vacc_SASA - Parallel"); #endif Vnm_print(0, "Vacc_SASA: Time elapsed: %f\n", ((double)clock() - ts) / CLOCKS_PER_SEC); return area; } VPUBLIC double Vacc_totalSASA(Vacc *thee, double radius) { return Vacc_SASA(thee, radius); } VPUBLIC double Vacc_atomSASA(Vacc *thee, double radius, Vatom *atom) { VaccSurf *asurf; int id; if (thee->surf == VNULL) Vacc_SASA(thee, radius); id = Vatom_getAtomID(atom); asurf = thee->surf[id]; /* See if this surface needs to be rebuilt */ if (asurf->probe_radius != radius) { Vnm_print(2, "Vacc_SASA: Warning -- probe radius changed from %g to %g!\n", asurf->probe_radius, radius); VaccSurf_dtor2(asurf); thee->surf[id] = Vacc_atomSurf(thee, atom, thee->refSphere, radius); asurf = thee->surf[id]; } return asurf->area; } VPUBLIC VaccSurf* VaccSurf_ctor(Vmem *mem, double probe_radius, int nsphere) { VaccSurf *thee; //thee = Vmem_malloc(mem, 1, sizeof(Vacc) ); thee = (VaccSurf*)calloc(1,sizeof(Vacc)); VASSERT( VaccSurf_ctor2(thee, mem, probe_radius, nsphere) ); return thee; } VPUBLIC int VaccSurf_ctor2(VaccSurf *thee, Vmem *mem, double probe_radius, int nsphere) { if (thee == VNULL) return 0; thee->mem = mem; thee->npts = nsphere; thee->probe_radius = probe_radius; thee->area = 0.0; if (thee->npts > 0) { thee->xpts = Vmem_malloc(thee->mem, thee->npts, sizeof(double)); thee->ypts = Vmem_malloc(thee->mem, thee->npts, sizeof(double)); thee->zpts = Vmem_malloc(thee->mem, thee->npts, sizeof(double)); thee->bpts = Vmem_malloc(thee->mem, thee->npts, sizeof(char)); } else { thee->xpts = VNULL; thee->ypts = VNULL; thee->zpts = VNULL; thee->bpts = VNULL; } return 1; } VPUBLIC void VaccSurf_dtor(VaccSurf **thee) { Vmem *mem; if ((*thee) != VNULL) { mem = (*thee)->mem; VaccSurf_dtor2(*thee); //Vmem_free(mem, 1, sizeof(VaccSurf), (void **)thee); free(*thee); (*thee) = VNULL; } } VPUBLIC void VaccSurf_dtor2(VaccSurf *thee) { if (thee->npts > 0) { Vmem_free(thee->mem, thee->npts, sizeof(double), (void **)&(thee->xpts)); Vmem_free(thee->mem, thee->npts, sizeof(double), (void **)&(thee->ypts)); Vmem_free(thee->mem, thee->npts, sizeof(double), (void **)&(thee->zpts)); Vmem_free(thee->mem, thee->npts, sizeof(char), (void **)&(thee->bpts)); } } VPUBLIC VaccSurf* Vacc_atomSurf(Vacc *thee, Vatom *atom, VaccSurf *ref, double prad) { VaccSurf *surf; size_t i, j, npts; int atomID; double arad, rad, pos[3], *apos; /* Get atom information */ arad = Vatom_getRadius(atom); apos = Vatom_getPosition(atom); atomID = Vatom_getAtomID(atom); if (arad < VSMALL) { return VaccSurf_ctor(thee->mem, prad, 0); } rad = arad + prad; /* Determine which points will contribute */ npts = 0; for (i=0; i<ref->npts; i++) { /* Reset point flag: zero-radius atoms do not contribute */ pos[0] = rad*(ref->xpts[i]) + apos[0]; pos[1] = rad*(ref->ypts[i]) + apos[1]; pos[2] = rad*(ref->zpts[i]) + apos[2]; if (ivdwAccExclus(thee, pos, prad, atomID)) { npts++; ref->bpts[i] = (ref->bpts[i] << 1) + 1; } else { ref->bpts[i] <<= 1; } } /* Allocate space for the points */ surf = VaccSurf_ctor(thee->mem, prad, npts); /* Assign the points */ j = 0; for (i=0; i<ref->npts; i++) { char flag = ref->bpts[i] & 1; ref->bpts[i] >>= 1; if (flag) { surf->bpts[j] = 1; surf->xpts[j] = rad*(ref->xpts[i]) + apos[0]; surf->ypts[j] = rad*(ref->ypts[i]) + apos[1]; surf->zpts[j] = rad*(ref->zpts[i]) + apos[2]; j++; } } /* Assign the area */ surf->area = 4.0*VPI*rad*rad*((double)(surf->npts))/((double)(ref->npts)); return surf; } VPUBLIC VaccSurf* VaccSurf_refSphere(Vmem *mem, int npts) { VaccSurf *surf; int nactual, i, itheta, ntheta, iphi, nphimax, nphi; double frac; double sintheta, costheta, theta, dtheta; double sinphi, cosphi, phi, dphi; /* Setup "constants" */ frac = ((double)(npts))/4.0; ntheta = VRINT(VSQRT(Vunit_pi*frac)); dtheta = Vunit_pi/((double)(ntheta)); nphimax = 2*ntheta; /* Count the actual number of points to be used */ nactual = 0; for (itheta=0; itheta<ntheta; itheta++) { theta = dtheta*((double)(itheta)); sintheta = VSIN(theta); costheta = VCOS(theta); nphi = VRINT(sintheta*nphimax); nactual += nphi; } /* Allocate space for the points */ surf = VaccSurf_ctor(mem, 1.0, nactual); /* Clear out the boolean array */ for (i=0; i<nactual; i++) surf->bpts[i] = 1; /* Assign the points */ nactual = 0; for (itheta=0; itheta<ntheta; itheta++) { theta = dtheta*((double)(itheta)); sintheta = VSIN(theta); costheta = VCOS(theta); nphi = VRINT(sintheta*nphimax); if (nphi != 0) { dphi = 2*Vunit_pi/((double)(nphi)); for (iphi=0; iphi<nphi; iphi++) { phi = dphi*((double)(iphi)); sinphi = VSIN(phi); cosphi = VCOS(phi); surf->xpts[nactual] = cosphi * sintheta; surf->ypts[nactual] = sinphi * sintheta; surf->zpts[nactual] = costheta; nactual++; } } } surf->npts = nactual; return surf; } VPUBLIC VaccSurf* Vacc_atomSASPoints(Vacc *thee, double radius, Vatom *atom) { VaccSurf *asurf = VNULL; int id; if (thee->surf == VNULL) Vacc_SASA(thee, radius); id = Vatom_getAtomID(atom); asurf = thee->surf[id]; /* See if this surface needs to be rebuilt */ if (asurf->probe_radius != radius) { Vnm_print(2, "Vacc_SASA: Warning -- probe radius changed from %g to %g!\n", asurf->probe_radius, radius); VaccSurf_dtor2(asurf); thee->surf[id] = Vacc_atomSurf(thee, atom, thee->refSphere, radius); asurf = thee->surf[id]; } return asurf; } VPUBLIC void Vacc_splineAccGradAtomNorm4(Vacc *thee, double center[VAPBS_DIM], double win, double infrad, Vatom *atom, double *grad) { int i; double dist, *apos, arad, sm, sm2, sm3, sm4, sm5, sm6, sm7; double e, e2, e3, e4, e5, e6, e7; double b, b2, b3, b4, b5, b6, b7; double c0, c1, c2, c3, c4, c5, c6, c7; double denom, mygrad; double mychi = 1.0; /* Char. func. value for given atom */ VASSERT(thee != NULL); /* The grad is zero by default */ for (i=0; i<VAPBS_DIM; i++) grad[i] = 0.0; /* *** CALCULATE THE CHARACTERISTIC FUNCTION VALUE FOR THIS ATOM AND THE * *** MAGNITUDE OF THE FORCE *** */ apos = Vatom_getPosition(atom); /* Zero-radius atoms don't contribute */ if (Vatom_getRadius(atom) > 0.0) { arad = Vatom_getRadius(atom); arad = arad + infrad; b = arad - win; e = arad + win; e2 = e * e; e3 = e2 * e; e4 = e3 * e; e5 = e4 * e; e6 = e5 * e; e7 = e6 * e; b2 = b * b; b3 = b2 * b; b4 = b3 * b; b5 = b4 * b; b6 = b5 * b; b7 = b6 * b; denom = e7 - 7.0*b*e6 + 21.0*b2*e5 - 35.0*e4*b3 + 35.0*e3*b4 - 21.0*b5*e2 + 7.0*e*b6 - b7; c0 = b4*(35.0*e3 - 21.0*b*e2 + 7*e*b2 - b3)/denom; c1 = -140.0*b3*e3/denom; c2 = 210.0*e2*b2*(e + b)/denom; c3 = -140.0*e*b*(e2 + 3.0*b*e + b2)/denom; c4 = 35.0*(e3 + 9.0*b*e2 + + 9.0*e*b2 + b3)/denom; c5 = -84.0*(e2 + 3.0*b*e + b2)/denom; c6 = 70.0*(e + b)/denom; c7 = -20.0/denom; dist = VSQRT(VSQR(apos[0]-center[0]) + VSQR(apos[1]-center[1]) + VSQR(apos[2]-center[2])); /* If we're inside an atom, the entire characteristic function * will be zero and the grad will be zero, so we can stop */ if (dist < (arad - win)) return; /* Likewise, if we're outside the smoothing window, the characteristic * function is unity and the grad will be zero, so we can stop */ else if (dist > (arad + win)) return; /* Account for floating point error at the border * NAB: COULDN'T THESE TESTS BE COMBINED AS BELOW * (Vacc_splineAccAtom)? */ else if ((VABS(dist - (arad - win)) < VSMALL) || (VABS(dist - (arad + win)) < VSMALL)) return; /* If we're inside the smoothing window */ else { sm = dist; sm2 = sm * sm; sm3 = sm2 * sm; sm4 = sm3 * sm; sm5 = sm4 * sm; sm6 = sm5 * sm; sm7 = sm6 * sm; mychi = c0 + c1*sm + c2*sm2 + c3*sm3 + c4*sm4 + c5*sm5 + c6*sm6 + c7*sm7; mygrad = c1 + 2.0*c2*sm + 3.0*c3*sm2 + 4.0*c4*sm3 + 5.0*c5*sm4 + 6.0*c6*sm5 + 7.0*c7*sm6; if (mychi <= 0.0) { /* Avoid numerical round off errors */ return; } else if (mychi > 1.0) { /* Avoid numerical round off errors */ mychi = 1.0; } } /* Now assemble the grad vector */ VASSERT(mychi > 0.0); for (i=0; i<VAPBS_DIM; i++) grad[i] = -(mygrad/mychi)*((center[i] - apos[i])/dist); } } VPUBLIC void Vacc_splineAccGradAtomNorm3(Vacc *thee, double center[VAPBS_DIM], double win, double infrad, Vatom *atom, double *grad) { int i; double dist, *apos, arad, sm, sm2, sm3, sm4, sm5; double e, e2, e3, e4, e5; double b, b2, b3, b4, b5; double c0, c1, c2, c3, c4, c5; double denom, mygrad; double mychi = 1.0; /* Char. func. value for given atom */ VASSERT(thee != NULL); /* The grad is zero by default */ for (i=0; i<VAPBS_DIM; i++) grad[i] = 0.0; /* *** CALCULATE THE CHARACTERISTIC FUNCTION VALUE FOR THIS ATOM AND THE * *** MAGNITUDE OF THE FORCE *** */ apos = Vatom_getPosition(atom); /* Zero-radius atoms don't contribute */ if (Vatom_getRadius(atom) > 0.0) { arad = Vatom_getRadius(atom); arad = arad + infrad; b = arad - win; e = arad + win; e2 = e * e; e3 = e2 * e; e4 = e3 * e; e5 = e4 * e; b2 = b * b; b3 = b2 * b; b4 = b3 * b; b5 = b4 * b; denom = pow((e - b), 5.0); c0 = -10.0*e2*b3 + 5.0*e*b4 - b5; c1 = 30.0*e2*b2; c2 = -30.0*(e2*b + e*b2); c3 = 10.0*(e2 + 4.0*e*b + b2); c4 = -15.0*(e + b); c5 = 6; c0 = c0/denom; c1 = c1/denom; c2 = c2/denom; c3 = c3/denom; c4 = c4/denom; c5 = c5/denom; dist = VSQRT(VSQR(apos[0]-center[0]) + VSQR(apos[1]-center[1]) + VSQR(apos[2]-center[2])); /* If we're inside an atom, the entire characteristic function * will be zero and the grad will be zero, so we can stop */ if (dist < (arad - win)) return; /* Likewise, if we're outside the smoothing window, the characteristic * function is unity and the grad will be zero, so we can stop */ else if (dist > (arad + win)) return; /* Account for floating point error at the border * NAB: COULDN'T THESE TESTS BE COMBINED AS BELOW * (Vacc_splineAccAtom)? */ else if ((VABS(dist - (arad - win)) < VSMALL) || (VABS(dist - (arad + win)) < VSMALL)) return; /* If we're inside the smoothing window */ else { sm = dist; sm2 = sm * sm; sm3 = sm2 * sm; sm4 = sm3 * sm; sm5 = sm4 * sm; mychi = c0 + c1*sm + c2*sm2 + c3*sm3 + c4*sm4 + c5*sm5; mygrad = c1 + 2.0*c2*sm + 3.0*c3*sm2 + 4.0*c4*sm3 + 5.0*c5*sm4; if (mychi <= 0.0) { /* Avoid numerical round off errors */ return; } else if (mychi > 1.0) { /* Avoid numerical round off errors */ mychi = 1.0; } } /* Now assemble the grad vector */ VASSERT(mychi > 0.0); for (i=0; i<VAPBS_DIM; i++) grad[i] = -(mygrad/mychi)*((center[i] - apos[i])/dist); } } /* /////////////////////////////////////////////////////////////////////////// // Routine: Vacc_atomdSAV // // Purpose: Calculates the vector valued atomic derivative of volume // // Args: radius The radius of the solvent probe in Angstroms // iatom Index of the atom in thee->alist // // Author: Jason Wagoner // Nathan Baker (original FORTRAN routine from UHBD by Brock Luty) /////////////////////////////////////////////////////////////////////////// */ VPUBLIC void Vacc_atomdSAV(Vacc *thee, double srad, Vatom *atom, double *dSA ) { int ipt, iatom; double area; double *tPos, tRad, vec[3]; double dx,dy,dz; VaccSurf *ref; dx = 0.0; dy = 0.0; dz = 0.0; /* Get the atom information */ ref = thee->refSphere; iatom = Vatom_getAtomID(atom); dSA[0] = 0.0; dSA[1] = 0.0; dSA[2] = 0.0; tPos = Vatom_getPosition(atom); tRad = Vatom_getRadius(atom); if(tRad == 0.0) return; area = 4.0*VPI*(tRad+srad)*(tRad+srad)/((double)(ref->npts)); for (ipt=0; ipt<ref->npts; ipt++) { vec[0] = (tRad+srad)*ref->xpts[ipt] + tPos[0]; vec[1] = (tRad+srad)*ref->ypts[ipt] + tPos[1]; vec[2] = (tRad+srad)*ref->zpts[ipt] + tPos[2]; if (ivdwAccExclus(thee, vec, srad, iatom)) { dx = dx+vec[0]-tPos[0]; dy = dy+vec[1]-tPos[1]; dz = dz+vec[2]-tPos[2]; } } if ((tRad+srad) != 0){ dSA[0] = dx*area/(tRad+srad); dSA[1] = dy*area/(tRad+srad); dSA[2] = dz*area/(tRad+srad); } } /* Note: This is purely test code to make certain that the dSASA code is behaving properly. This function should NEVER be called by anyone other than an APBS developer at Wash U. */ VPRIVATE double Vacc_SASAPos(Vacc *thee, double radius) { int i, natom; double area; Vatom *atom; VaccSurf *asurf; natom = Valist_getNumberAtoms(thee->alist); /* Calculate the area */ area = 0.0; for (i=0; i<natom; i++) { atom = Valist_getAtom(thee->alist, i); asurf = thee->surf[i]; VaccSurf_dtor2(asurf); thee->surf[i] = Vacc_atomSurf(thee, atom, thee->refSphere, radius); asurf = thee->surf[i]; area += (asurf->area); } return area; } VPRIVATE double Vacc_atomSASAPos(Vacc *thee, double radius, Vatom *atom, /* The atom being manipulated */ int mode ) { VaccSurf *asurf; int id; static int warned = 0; if ((thee->surf == VNULL) || (mode == 1)){ if(!warned){ Vnm_print(2, "WARNING: Recalculating entire surface!!!!\n"); warned = 1; } Vacc_SASAPos(thee, radius); // reinitialize before we can do anything about doing a calculation on a repositioned atom } id = Vatom_getAtomID(atom); asurf = thee->surf[id]; VaccSurf_dtor(&asurf); thee->surf[id] = Vacc_atomSurf(thee, atom, thee->refSphere, radius); asurf = thee->surf[id]; //printf("%s: Time elapsed: %f\n", __func__, ((double)clock() - ts) / CLOCKS_PER_SEC); return asurf->area; } /* /////////////////////////////////////////////////////////////////////////// // Routine: Vacc_atomdSASA // // Purpose: Calculates the derivative of surface area with respect to atomic // displacement using finite difference methods. // // Args: radius The radius of the solvent probe in Angstroms // iatom Index of the atom in thee->alist // // Author: Jason Wagoner // David Gohara // Nathan Baker (original FORTRAN routine from UHBD by Brock Luty) /////////////////////////////////////////////////////////////////////////// */ VPUBLIC void Vacc_atomdSASA(Vacc *thee, double dpos, double srad, Vatom *atom, double *dSA ) { double *temp_Pos, tPos[3], axb1, axt1, ayb1, ayt1, azb1, azt1; VaccSurf *ref; //printf("%s: entering\n", __func__); time_t ts; ts = clock(); /* Get the atom information */ ref = thee->refSphere; temp_Pos = Vatom_getPosition(atom); // Get a pointer to the position object. You actually manipulate the atom doing this... tPos[0] = temp_Pos[0]; tPos[1] = temp_Pos[1]; tPos[2] = temp_Pos[2]; /* Shift by pos -/+ on x */ temp_Pos[0] -= dpos; axb1 = Vacc_atomSASAPos(thee, srad, atom,0); temp_Pos[0] = tPos[0]; temp_Pos[0] += dpos; axt1 = Vacc_atomSASAPos(thee, srad, atom,0); temp_Pos[0] = tPos[0]; /* Shift by pos -/+ on y */ temp_Pos[1] -= dpos; ayb1 = Vacc_atomSASAPos(thee, srad, atom,0); temp_Pos[1] = tPos[1]; temp_Pos[1] += dpos; ayt1 = Vacc_atomSASAPos(thee, srad, atom,0); temp_Pos[1] = tPos[1]; /* Shift by pos -/+ on z */ temp_Pos[2] -= dpos; azb1 = Vacc_atomSASAPos(thee, srad, atom,0); temp_Pos[2] = tPos[2]; temp_Pos[2] += dpos; azt1 = Vacc_atomSASAPos(thee, srad, atom,0); temp_Pos[2] = tPos[2]; /* Reset the atom SASA to zero displacement */ Vacc_atomSASAPos(thee, srad, atom,0); /* Calculate the final value */ dSA[0] = (axt1-axb1)/(2.0 * dpos); dSA[1] = (ayt1-ayb1)/(2.0 * dpos); dSA[2] = (azt1-azb1)/(2.0 * dpos); } /* Note: This is purely test code to make certain that the dSASA code is behaving properly. This function should NEVER be called by anyone other than an APBS developer at Wash U. */ VPUBLIC void Vacc_totalAtomdSASA(Vacc *thee, double dpos, double srad, Vatom *atom, double *dSA) { int iatom; double *temp_Pos, tRad; double tPos[3]; double axb1,axt1,ayb1,ayt1,azb1,azt1; VaccSurf *ref; /* Get the atom information */ ref = thee->refSphere; temp_Pos = Vatom_getPosition(atom); tRad = Vatom_getRadius(atom); iatom = Vatom_getAtomID(atom); dSA[0] = 0.0; dSA[1] = 0.0; dSA[2] = 0.0; tPos[0] = temp_Pos[0]; tPos[1] = temp_Pos[1]; tPos[2] = temp_Pos[2]; /* Shift by pos -/+ on x */ temp_Pos[0] -= dpos; axb1 = Vacc_atomSASAPos(thee, srad, atom, 1); temp_Pos[0] = tPos[0]; temp_Pos[0] += dpos; axt1 = Vacc_atomSASAPos(thee, srad, atom, 1); temp_Pos[0] = tPos[0]; /* Shift by pos -/+ on y */ temp_Pos[1] -= dpos; ayb1 = Vacc_atomSASAPos(thee, srad, atom, 1); temp_Pos[1] = tPos[1]; temp_Pos[1] += dpos; ayt1 = Vacc_atomSASAPos(thee, srad, atom, 1); temp_Pos[1] = tPos[1]; /* Shift by pos -/+ on z */ temp_Pos[2] -= dpos; azb1 = Vacc_atomSASAPos(thee, srad, atom, 1); temp_Pos[2] = tPos[2]; temp_Pos[2] += dpos; azt1 = Vacc_atomSASAPos(thee, srad, atom, 1); temp_Pos[2] = tPos[2]; /* Calculate the final value */ dSA[0] = (axt1-axb1)/(2.0 * dpos); dSA[1] = (ayt1-ayb1)/(2.0 * dpos); dSA[2] = (azt1-azb1)/(2.0 * dpos); } /* Note: This is purely test code to make certain that the dSASA code is behaving properly. This function should NEVER be called by anyone other than an APBS developer at Wash U. */ VPUBLIC void Vacc_totalAtomdSAV(Vacc *thee, double dpos, double srad, Vatom *atom, double *dSA, Vclist *clist) { int iatom; double *temp_Pos, tRad; double tPos[3]; double axb1,axt1,ayb1,ayt1,azb1,azt1; VaccSurf *ref; /* Get the atom information */ ref = thee->refSphere; temp_Pos = Vatom_getPosition(atom); tRad = Vatom_getRadius(atom); iatom = Vatom_getAtomID(atom); dSA[0] = 0.0; dSA[1] = 0.0; dSA[2] = 0.0; tPos[0] = temp_Pos[0]; tPos[1] = temp_Pos[1]; tPos[2] = temp_Pos[2]; /* Shift by pos -/+ on x */ temp_Pos[0] -= dpos; axb1 = Vacc_totalSAV(thee,clist, VNULL, srad); temp_Pos[0] = tPos[0]; temp_Pos[0] += dpos; axt1 = Vacc_totalSAV(thee,clist, VNULL, srad); temp_Pos[0] = tPos[0]; /* Shift by pos -/+ on y */ temp_Pos[1] -= dpos; ayb1 = Vacc_totalSAV(thee,clist, VNULL, srad); temp_Pos[1] = tPos[1]; temp_Pos[1] += dpos; ayt1 = Vacc_totalSAV(thee,clist, VNULL, srad); temp_Pos[1] = tPos[1]; /* Shift by pos -/+ on z */ temp_Pos[2] -= dpos; azb1 = Vacc_totalSAV(thee,clist, VNULL, srad); temp_Pos[2] = tPos[2]; temp_Pos[2] += dpos; azt1 = Vacc_totalSAV(thee,clist, VNULL, srad); temp_Pos[2] = tPos[2]; /* Calculate the final value */ dSA[0] = (axt1-axb1)/(2.0 * dpos); dSA[1] = (ayt1-ayb1)/(2.0 * dpos); dSA[2] = (azt1-azb1)/(2.0 * dpos); } VPUBLIC double Vacc_totalSAV(Vacc *thee, Vclist *clist, APOLparm *apolparm, double radius) { int i; int npts[3]; double spacs[3], vec[3]; double w, wx, wy, wz, len, fn, x, y, z, vol; double vol_density,sav; double *lower_corner, *upper_corner; sav = 0.0; vol = 1.0; vol_density = 2.0; lower_corner = clist->lower_corner; upper_corner = clist->upper_corner; for (i=0; i<3; i++) { len = upper_corner[i] - lower_corner[i]; vol *= len; fn = len*vol_density + 1; npts[i] = (int)ceil(fn); spacs[i] = len/((double)(npts[i])-1.0); if (apolparm != VNULL) { if (apolparm->setgrid) { if (apolparm->grid[i] > spacs[i]) { Vnm_print(2, "Vacc_totalSAV: Warning, your GRID value (%g) is larger than the recommended value (%g)!\n", apolparm->grid[i], spacs[i]); } spacs[i] = apolparm->grid[i]; } } } for (x=lower_corner[0]; x<=upper_corner[0]; x=x+spacs[0]) { if ( VABS(x - lower_corner[0]) < VSMALL) { wx = 0.5; } else if ( VABS(x - upper_corner[0]) < VSMALL) { wx = 0.5; } else { wx = 1.0; } vec[0] = x; for (y=lower_corner[1]; y<=upper_corner[1]; y=y+spacs[1]) { if ( VABS(y - lower_corner[1]) < VSMALL) { wy = 0.5; } else if ( VABS(y - upper_corner[1]) < VSMALL) { wy = 0.5; } else { wy = 1.0; } vec[1] = y; for (z=lower_corner[2]; z<=upper_corner[2]; z=z+spacs[2]) { if ( VABS(z - lower_corner[2]) < VSMALL) { wz = 0.5; } else if ( VABS(z - upper_corner[2]) < VSMALL) { wz = 0.5; } else { wz = 1.0; } vec[2] = z; w = wx*wy*wz; sav += (w*(1.0-Vacc_ivdwAcc(thee, vec, radius))); } /* z loop */ } /* y loop */ } /* x loop */ w = spacs[0]*spacs[1]*spacs[2]; sav *= w; return sav; } int Vacc_wcaEnergyAtom(Vacc *thee, APOLparm *apolparm, Valist *alist, Vclist *clist, int iatom, double *value) { int i; int npts[3]; int pad = 14; int xmin, ymin, zmin; int xmax, ymax, zmax; double sigma6, sigma12; double spacs[3], vec[3]; double w, wx, wy, wz, len, fn, x, y, z, vol; double x2,y2,z2,r; double vol_density, energy, rho, srad; double psig, epsilon, watepsilon, sigma, watsigma, eni, chi; double *pos; double *lower_corner, *upper_corner; Vatom *atom = VNULL; VASSERT(apolparm != VNULL); energy = 0.0; vol = 1.0; vol_density = 2.0; lower_corner = clist->lower_corner; upper_corner = clist->upper_corner; atom = Valist_getAtom(alist, iatom); pos = Vatom_getPosition(atom); /* Note: these are the original temporary water parameters... they have been replaced by entries in a parameter file: watsigma = 1.7683; watepsilon = 0.1521; watepsilon = watepsilon*4.184; */ srad = apolparm->srad; rho = apolparm->bconc; watsigma = apolparm->watsigma; watepsilon = apolparm->watepsilon; psig = atom->radius; epsilon = atom->epsilon; sigma = psig + watsigma; epsilon = VSQRT((epsilon * watepsilon)); /* parameters */ sigma6 = VPOW(sigma,6); sigma12 = VPOW(sigma,12); /* OPLS-style radius: double sigmar = sigma*VPOW(2, (1.0/6.0)); */ xmin = pos[0] - pad; xmax = pos[0] + pad; ymin = pos[1] - pad; ymax = pos[1] + pad; zmin = pos[2] - pad; zmax = pos[2] + pad; for (i=0; i<3; i++) { len = (upper_corner[i] + pad) - (lower_corner[i] - pad); vol *= len; fn = len*vol_density + 1; npts[i] = (int)ceil(fn); spacs[i] = 0.5; if (apolparm->setgrid) { if (apolparm->grid[i] > spacs[i]) { Vnm_print(2, "Vacc_totalSAV: Warning, your GRID value (%g) is larger than the recommended value (%g)!\n", apolparm->grid[i], spacs[i]); } spacs[i] = apolparm->grid[i]; } } for (x=xmin; x<=xmax; x=x+spacs[0]) { if ( VABS(x - xmin) < VSMALL) { wx = 0.5; } else if ( VABS(x - xmax) < VSMALL) { wx = 0.5; } else { wx = 1.0; } vec[0] = x; for (y=ymin; y<=ymax; y=y+spacs[1]) { if ( VABS(y - ymin) < VSMALL) { wy = 0.5; } else if ( VABS(y - ymax) < VSMALL) { wy = 0.5; } else { wy = 1.0; } vec[1] = y; for (z=zmin; z<=zmax; z=z+spacs[2]) { if ( VABS(z - zmin) < VSMALL) { wz = 0.5; } else if ( VABS(z - zmax) < VSMALL) { wz = 0.5; } else { wz = 1.0; } vec[2] = z; w = wx*wy*wz; chi = Vacc_ivdwAcc(thee, vec, srad); if (VABS(chi) > VSMALL) { x2 = VSQR(vec[0]-pos[0]); y2 = VSQR(vec[1]-pos[1]); z2 = VSQR(vec[2]-pos[2]); r = VSQRT(x2+y2+z2); if (r <= 14 && r >= sigma) { eni = chi*rho*epsilon*(-2.0*sigma6/VPOW(r,6)+sigma12/VPOW(r,12)); }else if (r <= 14){ eni = -1.0*epsilon*chi*rho; }else{ eni = 0.0; } }else{ eni = 0.0; } energy += eni*w; } /* z loop */ } /* y loop */ } /* x loop */ w = spacs[0]*spacs[1]*spacs[2]; energy *= w; *value = energy; return VRC_SUCCESS; } VPUBLIC int Vacc_wcaEnergy(Vacc *acc, APOLparm *apolparm, Valist *alist, Vclist *clist){ int iatom; int rc = 0; double energy = 0.0; double tenergy = 0.0; double rho = apolparm->bconc; /* Do a sanity check to make sure that watepsilon and watsigma are set * If not, return with an error. */ if(apolparm->setwat == 0){ Vnm_print(2,"Vacc_wcaEnergy: Error. No value was set for watsigma and watepsilon.\n"); return VRC_FAILURE; } if (VABS(rho) < VSMALL) { apolparm->wcaEnergy = tenergy; return 1; } for (iatom=0; iatom<Valist_getNumberAtoms(alist); iatom++){ rc = Vacc_wcaEnergyAtom(acc, apolparm, alist, clist, iatom, &energy); if(rc == 0) return 0; tenergy += energy; } apolparm->wcaEnergy = tenergy; return VRC_SUCCESS; } VPUBLIC int Vacc_wcaForceAtom(Vacc *thee, APOLparm *apolparm, Vclist *clist, Vatom *atom, double *force ){ int i, si, npts[3], pad = 14, xmin, ymin, zmin, xmax, ymax, zmax; double sigma6, sigma12, spacs[3], vec[3], fpt[3], w, wx, wy, wz, len, fn, x, y, z, vol, x2, y2, z2, r, vol_density, fo, rho, srad, psig, epsilon, watepsilon, sigma, watsigma, chi, *pos, *lower_corner, *upper_corner; /* Allocate needed variables now that we've asserted required conditions. */ time_t ts; ts = clock(); VASSERT(apolparm != VNULL); /* Do a sanity check to make sure that watepsilon and watsigma are set * If not, return with an error. */ if(apolparm->setwat == 0){ Vnm_print(2,"Vacc_wcaEnergy: Error. No value was set for watsigma and watepsilon.\n"); return VRC_FAILURE; } vol = 1.0; vol_density = 2.0; lower_corner = clist->lower_corner; upper_corner = clist->upper_corner; pos = Vatom_getPosition(atom); srad = apolparm->srad; rho = apolparm->bconc; watsigma = apolparm->watsigma; watepsilon = apolparm->watepsilon; psig = atom->radius; epsilon = atom->epsilon; sigma = psig + watsigma; epsilon = VSQRT((epsilon * watepsilon)); /* parameters */ sigma6 = VPOW(sigma,6); sigma12 = VPOW(sigma,12); /* OPLS-style radius: double sigmar = sigma*VPOW(2, (1.0/6.0)); */ for (i=0; i<3; i++) { len = (upper_corner[i] + pad) - (lower_corner[i] - pad); vol *= len; fn = len*vol_density + 1; npts[i] = (int)ceil(fn); spacs[i] = 0.5; force[i] = 0.0; if (apolparm->setgrid) { if (apolparm->grid[i] > spacs[i]) { Vnm_print(2, "Vacc_totalSAV: Warning, your GRID value (%g) is larger than the recommended value (%g)!\n", apolparm->grid[i], spacs[i]); } spacs[i] = apolparm->grid[i]; } } xmin = pos[0] - pad; xmax = pos[0] + pad; ymin = pos[1] - pad; ymax = pos[1] + pad; zmin = pos[2] - pad; zmax = pos[2] + pad; for (x=xmin; x<=xmax; x=x+spacs[0]) { if ( VABS(x - xmin) < VSMALL) { wx = 0.5; } else if ( VABS(x - xmax) < VSMALL) { wx = 0.5; } else { wx = 1.0; } vec[0] = x; for (y=ymin; y<=ymax; y=y+spacs[1]) { if ( VABS(y - ymin) < VSMALL) { wy = 0.5; } else if ( VABS(y - ymax) < VSMALL) { wy = 0.5; } else { wy = 1.0; } vec[1] = y; for (z=zmin; z<=zmax; z=z+spacs[2]) { if ( VABS(z - zmin) < VSMALL) { wz = 0.5; } else if ( VABS(z - zmax) < VSMALL) { wz = 0.5; } else { wz = 1.0; } vec[2] = z; w = wx*wy*wz; chi = Vacc_ivdwAcc(thee, vec, srad); if (chi != 0.0) { x2 = VSQR(vec[0]-pos[0]); y2 = VSQR(vec[1]-pos[1]); z2 = VSQR(vec[2]-pos[2]); r = VSQRT(x2+y2+z2); if (r <= 14 && r >= sigma){ fo = 12.0*chi*rho*epsilon*(sigma6/VPOW(r,7)-sigma12/VPOW(r,13)); fpt[0] = -1.0*(pos[0]-vec[0])*fo/r; fpt[1] = -1.0*(pos[1]-vec[1])*fo/r; fpt[2] = -1.0*(pos[2]-vec[2])*fo/r; }else { for (si=0; si < 3; si++) fpt[si] = 0.0; } }else { for (si=0; si < 3; si++) fpt[si] = 0.0; } for(i=0;i<3;i++){ force[i] += (w*fpt[i]); } } /* z loop */ } /* y loop */ } /* x loop */ w = spacs[0]*spacs[1]*spacs[2]; for(i=0;i<3;i++) force[i] *= w; return VRC_SUCCESS; }
task1.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define ITERATIONS 200000000 /* kompiliert mit: gcc -std=c99 -fopenmp -O3 -o task1 task1.c (module load gcc/8.2.0) ausgeführt als Job mit: export OMP_NUM_THREADS=4 export OMP_PLACES=cores export OMP_PROC_BIN=close ./task1 export OMP_PROC_BIN=spread ./task1 export OMP_PROC_BIN=master ./task1 */ int main(void){ const int threads_num = 4; //printf("Total threads: %d\n",threads_num); int inc = 0; double wtime = omp_get_wtime(); #pragma omp parallel num_threads(threads_num) { #pragma omp for for(long i = 0;i < ITERATIONS; i++){ #pragma omp atomic inc++; } int threadnum = omp_get_thread_num(); printf("Thread: %d\n", threadnum); } wtime = omp_get_wtime() - wtime; printf("Increment is at %d after %d iterations.\n",inc, ITERATIONS); printf("Time taken %f\n", wtime ); }
softmax_arm_func.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef SOFTMAX_OP #pragma once #include "../../math/softmax.h" #include "operators/op_param.h" namespace paddle_mobile { namespace operators { void softmax_basic_axis_float(const float *din, float *dout, const int axis_size, const int inner_num, const int outer_num) { int compute_size = inner_num * outer_num; #pragma omp parallel for for (int i = 0; i < compute_size; ++i) { int idx_inner = i % inner_num; int idx_outer = (i / inner_num) * axis_size; int real_index = idx_outer * inner_num + idx_inner; float max_data = din[real_index]; // get max for (int j = 1; j < axis_size; ++j) { real_index += inner_num; max_data = din[real_index] > max_data ? din[real_index] : max_data; } real_index = idx_outer * inner_num + idx_inner; // sub, exp and sum dout[real_index] = expf(din[real_index] - max_data); float sum_data = dout[real_index]; for (int j = 1; j < axis_size; ++j) { real_index += inner_num; dout[real_index] = expf(din[real_index] - max_data); sum_data += dout[real_index]; } float sum_inv = 1.f / sum_data; real_index = idx_outer * inner_num + idx_inner; // get softmax result for (int j = 0; j < axis_size; ++j) { dout[real_index] *= sum_inv; real_index += inner_num; } } } template <typename P> void SoftmaxCompute(const SoftmaxParam<CPU> &param) { const Tensor *in_x = param.InputX(); Tensor *out = param.Out(); auto x_dims = in_x->dims(); out->Resize(x_dims); out->mutable_data<float>(); if (param.has_axis_) { int axis = param.axis_; int axis_size = x_dims[axis]; auto x_rank = x_dims.size(); DLOG << "x_rank :" << x_rank; if (axis < 0) { axis += x_rank; } DLOG << "axis :" << axis; int outer_num = framework::product(framework::slice_ddim(x_dims, 0, axis)); DLOG << "outer_num :" << outer_num; int inner_num = framework::product(framework::slice_ddim(x_dims, axis + 1, x_rank)); DLOG << "inner_num :" << inner_num; softmax_basic_axis_float(in_x->data<float>(), out->data<float>(), axis_size, inner_num, outer_num); } else { math::SoftmaxFuntor<CPU, float>()(in_x, out); } } } // namespace operators } // namespace paddle_mobile #endif
wick.c
/* Copyright 2021 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "wick.h" #include <stdbool.h> #include <assert.h> /* C-kernel for filling custom RDMs using particle RDMs. */ int wickfill(double complex *target, const double complex *source, const uint32_t *indices, const double factor, const uint32_t *delta, const int norb, const int trank, const int srank) { if (srank == 0 && trank == 1) { #pragma omp parallel for schedule(static) for (int i = 0; i < norb; ++i) { target[i + norb * i] += factor; } } else if (srank == 1 && trank == 1) { #pragma omp parallel for schedule(static) for (int i = 0; i < norb; ++i) { for (int j = 0; j != norb; ++j) { const int mat[2] = {i, j}; target[j + norb * i] += factor * source[mat[indices[1]] + norb * mat[indices[0]]]; } } } else if (srank == 0 && trank == 2) { #pragma omp parallel for schedule(static) for (int ij = 0; ij < norb * norb; ++ij) { const int i = ij / norb; const int j = ij % norb; for (int k = 0; k != norb; ++k) { for (int l = 0; l != norb; ++l) { const int mat[4] = {i, j, k, l}; if (mat[delta[0 * 2 + 0]] == mat[delta[0 * 2 + 1]] && mat[delta[1 * 2 + 0]] == mat[delta[1 * 2 + 1]]) { target[l + norb * (k + norb * (j + norb * i))] += factor; } } } } } else if (srank == 1 && trank == 2) { #pragma omp parallel for schedule(static) for (int ij = 0; ij < norb * norb; ++ij) { const int i = ij / norb; const int j = ij % norb; for (int k = 0; k != norb; ++k) { for (int l = 0; l != norb; ++l) { const int mat[4] = {i, j, k, l}; if (mat[delta[0 * 2 + 0]] == mat[delta[0 * 2 + 1]]) { target[l + norb * (k + norb * (j + norb * i))] += factor * source[mat[indices[1]] + norb * mat[indices[0]]]; } } } } } else if (srank == 2 && trank == 2) { #pragma omp parallel for schedule(static) for (int ij = 0; ij < norb * norb; ++ij) { const int i = ij / norb; const int j = ij % norb; for (int k = 0; k != norb; ++k) { for (int l = 0; l != norb; ++l) { const int mat[4] = {i, j, k, l}; target[l + norb * (k + norb * (j + norb * i))] += factor * source[mat[indices[3]] + norb * (mat[indices[2]] + norb * (mat[indices[1]] + norb * mat[indices[0]]))]; } } } } else if (srank == 0 && trank == 3) { #pragma omp parallel for schedule(static) for (int i = 0; i < norb; ++i) { for (int j = 0; j != norb; ++j) { for (int k = 0; k != norb; ++k) { for (int l = 0; l != norb; ++l) { for (int o = 0; o != norb; ++o) { for (int p = 0; p != norb; ++p) { const int mat[6] = {i, j, k, l, o, p}; if (mat[delta[0 * 2 + 0]] == mat[delta[0 * 2 + 1]] && mat[delta[1 * 2 + 0]] == mat[delta[1 * 2 + 1]] && mat[delta[2 * 2 + 0]] == mat[delta[2 * 2 + 1]]) { target[p + norb * (o + norb * (l + norb * (k + norb * (j + norb * i))))] += factor; } } } } } } } } else if (srank == 1 && trank == 3) { #pragma omp parallel for schedule(static) for (int ijk = 0; ijk < norb * norb * norb; ++ijk) { const int i = ijk / (norb * norb); const int j = (ijk % (norb * norb)) / norb; const int k = (ijk % (norb * norb)) % norb; for (int l = 0; l != norb; ++l) { for (int o = 0; o != norb; ++o) { for (int p = 0; p != norb; ++p) { const int mat[6] = {i, j, k, l, o, p}; if (mat[delta[0 * 2 + 0]] == mat[delta[0 * 2 + 1]] && mat[delta[1 * 2 + 0]] == mat[delta[1 * 2 + 1]]) { target[p + norb * (o + norb * (l + norb * (k + norb * (j + norb * i))))] += factor * source[mat[indices[1]] + norb * mat[indices[0]]]; } } } } } } else if (srank == 2 && trank == 3) { #pragma omp parallel for schedule(static) for (int ijk = 0; ijk < norb * norb * norb; ++ijk) { const int i = ijk / (norb * norb); const int j = (ijk % (norb * norb)) / norb; const int k = (ijk % (norb * norb)) % norb; for (int l = 0; l != norb; ++l) { for (int o = 0; o != norb; ++o) { for (int p = 0; p != norb; ++p) { const int mat[6] = {i, j, k, l, o, p}; if (mat[delta[0 * 2 + 0]] == mat[delta[0 * 2 + 1]]) { target[p + norb * (o + norb * (l + norb * (k + norb * (j + norb * i))))] += factor * source[mat[indices[3]] + norb * (mat[indices[2]] + norb * (mat[indices[1]] + norb * mat[indices[0]]))]; } } } } } } else if (srank == 3 && trank == 3) { #pragma omp parallel for schedule(static) for (int ijk = 0; ijk < norb * norb * norb; ++ijk) { const int i = ijk / (norb * norb); const int j = (ijk % (norb * norb)) / norb; const int k = (ijk % (norb * norb)) % norb; for (int l = 0; l != norb; ++l) { for (int o = 0; o != norb; ++o) { for (int p = 0; p != norb; ++p) { const int mat[6] = {i, j, k, l, o, p}; target[p + norb * (o + norb * (l + norb * (k + norb * (j + norb * i))))] += factor * source[mat[indices[5]] + norb * (mat[indices[4]] + norb * (mat[indices[3]] + norb * (mat[indices[2]] + norb * (mat[indices[1]] + norb * mat[indices[0]]))))]; } } } } } else if (srank == 0 && trank == 4) { #pragma omp parallel for schedule(static) for (int ijkl = 0; ijkl < norb * norb * norb * norb; ++ijkl) { const int i = ijkl / (norb * norb * norb); const int jkl = ijkl % (norb * norb * norb); const int j = jkl / (norb * norb); const int kl = jkl % (norb * norb); const int k = kl / norb; const int l = kl % norb; for (int o = 0; o != norb; ++o) { for (int p = 0; p != norb; ++p) { for (int q = 0; q != norb; ++q) { for (int r = 0; r != norb; ++r) { const int mat[8] = {i, j, k, l, o, p, q, r}; if (mat[delta[0 * 2 + 0]] == mat[delta[0 * 2 + 1]] && mat[delta[1 * 2 + 0]] == mat[delta[1 * 2 + 1]] && mat[delta[2 * 2 + 0]] == mat[delta[2 * 2 + 1]] && mat[delta[3 * 2 + 0]] == mat[delta[3 * 2 + 1]]) { target[r + norb * (q + norb * (p + norb * (o + norb * (l + norb * (k + norb * (j + norb * i))))))] += factor; } } } } } } } else if (srank == 1 && trank == 4) { #pragma omp parallel for schedule(static) for (int ijkl = 0; ijkl < norb * norb * norb * norb; ++ijkl) { const int i = ijkl / (norb * norb * norb); const int jkl = ijkl % (norb * norb * norb); const int j = jkl / (norb * norb); const int kl = jkl % (norb * norb); const int k = kl / norb; const int l = kl % norb; for (int o = 0; o != norb; ++o) { for (int p = 0; p != norb; ++p) { for (int q = 0; q != norb; ++q) { for (int r = 0; r != norb; ++r) { const int mat[8] = {i, j, k, l, o, p, q, r}; if (mat[delta[0 * 2 + 0]] == mat[delta[0 * 2 + 1]] && mat[delta[1 * 2 + 0]] == mat[delta[1 * 2 + 1]] && mat[delta[2 * 2 + 0]] == mat[delta[2 * 2 + 1]]) { target[r + norb * (q + norb * (p + norb * (o + norb * (l + norb * (k + norb * (j + norb * i))))))] += factor * source[mat[indices[1]] + norb * mat[indices[0]]]; } } } } } } } else if (srank == 2 && trank == 4) { #pragma omp parallel for schedule(static) for (int ijkl = 0; ijkl < norb * norb * norb * norb; ++ijkl) { const int i = ijkl / (norb * norb * norb); const int jkl = ijkl % (norb * norb * norb); const int j = jkl / (norb * norb); const int kl = jkl % (norb * norb); const int k = kl / norb; const int l = kl % norb; for (int o = 0; o != norb; ++o) { for (int p = 0; p != norb; ++p) { for (int q = 0; q != norb; ++q) { for (int r = 0; r != norb; ++r) { const int mat[8] = {i, j, k, l, o, p, q, r}; if (mat[delta[0 * 2 + 0]] == mat[delta[0 * 2 + 1]] && mat[delta[1 * 2 + 0]] == mat[delta[1 * 2 + 1]]) { target[r + norb * (q + norb * (p + norb * (o + norb * (l + norb * (k + norb * (j + norb * i))))))] += factor * source[mat[indices[3]] + norb * (mat[indices[2]] + norb * (mat[indices[1]] + norb * mat[indices[0]]))]; } } } } } } } else if (srank == 3 && trank == 4) { #pragma omp parallel for schedule(static) for (int ijkl = 0; ijkl < norb * norb * norb * norb; ++ijkl) { const int i = ijkl / (norb * norb * norb); const int jkl = ijkl % (norb * norb * norb); const int j = jkl / (norb * norb); const int kl = jkl % (norb * norb); const int k = kl / norb; const int l = kl % norb; for (int o = 0; o != norb; ++o) { for (int p = 0; p != norb; ++p) { for (int q = 0; q != norb; ++q) { for (int r = 0; r != norb; ++r) { const int mat[8] = {i, j, k, l, o, p, q, r}; if (mat[delta[0 * 2 + 0]] == mat[delta[0 * 2 + 1]]) { target[r + norb * (q + norb * (p + norb * (o + norb * (l + norb * (k + norb * (j + norb * i))))))] += factor * source[mat[indices[5]] + norb * (mat[indices[4]] + norb * (mat[indices[3]] + norb * (mat[indices[2]] + norb * (mat[indices[1]] + norb * mat[indices[0]]))))]; } } } } } } } else if (srank == 4 && trank == 4) { #pragma omp parallel for schedule(static) for (int ijkl = 0; ijkl < norb * norb * norb * norb; ++ijkl) { const int i = ijkl / (norb * norb * norb); const int jkl = ijkl % (norb * norb * norb); const int j = jkl / (norb * norb); const int kl = jkl % (norb * norb); const int k = kl / norb; const int l = kl % norb; for (int o = 0; o != norb; ++o) { for (int p = 0; p != norb; ++p) { for (int q = 0; q != norb; ++q) { for (int r = 0; r != norb; ++r) { const int mat[8] = {i, j, k, l, o, p, q, r}; target[r + norb * (q + norb * (p + norb * (o + norb * (l + norb * (k + norb * (j + norb * i))))))] += factor * source[mat[indices[7]] + norb * (mat[indices[6]] + norb * (mat[indices[5]] + norb * (mat[indices[4]] + norb * (mat[indices[3]] + norb * (mat[indices[2]] + norb * (mat[indices[1]] + norb * mat[indices[0]]))))))]; } } } } } } else { assert(false); } return 0; }
defaultmap-3.c
/* { dg-do compile } */ struct S { int s; }; void foo (char *); void bar (int, char *, struct S, int *); #pragma omp declare target to (bar) #define N 16 void f1 (int sc1, struct S ag1, int *pt1) { char ar1[N]; foo (ar1); #pragma omp target defaultmap(default:scalar) defaultmap(to:aggregate) defaultmap(none:pointer) /* { dg-message "note: enclosing 'target'" } */ bar (sc1, ar1, ag1, pt1); /* { dg-error "'pt1' not specified in enclosing 'target'" } */ } void f2 (int sc2, struct S ag2, int *pt2) { char ar2[N]; foo (ar2); #pragma omp target defaultmap(none:scalar) defaultmap(from:aggregate) defaultmap(default:pointer) /* { dg-message "note: enclosing 'target'" } */ bar (sc2, ar2, ag2, pt2); /* { dg-error "'sc2' not specified in enclosing 'target'" } */ } void f3 (int sc3, struct S ag3, int *pt3) { char ar3[N]; foo (ar3); #pragma omp target defaultmap(firstprivate:scalar) defaultmap(none:aggregate) defaultmap(to:pointer) /* { dg-message "note: enclosing 'target'" } */ bar (sc3, ar3, ag3, pt3); /* { dg-error "'ar3' not specified in enclosing 'target'" } */ } /* { dg-error "'ag3' not specified in enclosing 'target'" "" { target *-*-* } .-1 } */
opencl_odf_fmt_plug.c
/* Modified by Dhiru Kholia <dhiru at openwall.com> for ODF Blowfish format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_odf; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_odf); #else #include <string.h> #include "sha.h" #include <openssl/blowfish.h> #include <openssl/aes.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "misc.h" #include "options.h" #include "common.h" #include "formats.h" #include "common-opencl.h" #define FORMAT_LABEL "ODF-opencl" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA1 OpenCL Blowfish" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(odf_cpu_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN MEM_ALIGN_WORD #define uint8_t unsigned char #define uint16_t unsigned short #define uint32_t unsigned int typedef struct { uint32_t length; uint8_t v[20]; // hash of password } odf_password; typedef struct { uint32_t v[32/4]; } odf_hash; typedef struct { uint8_t length; uint8_t salt[64]; uint32_t iterations; uint32_t outlen; } odf_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; typedef struct { int cipher_type; int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int content_length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } odf_cpu_salt; static odf_cpu_salt *cur_salt; static struct fmt_tests odf_tests[] = { {"$odf$*0*0*1024*16*df6c10f64d191a841812af53874b636d014ce3fe*8*07e28aff39d2660e*16*b124be9f3346fb77e0ebcc3bb80028f8*0*2276a1077f6a2a027bd565ce89824d6a20086e378876be05c4b8e3796a460e828c9803a692caf7a53492c220d1d7ecbf4e2d336c7abf5a7672acc804ca267318252cbc13676616d1fde38820f9fbeef1360067d9de096ba8c1032ae947bde1d0fedaf37b6020663d49faf36b7c095c5b9aae11c8fc2be74148f008edbdbb180b44028ad8259f1215b483542bf3027f56dee5f962448333b30f88e6ae4790b60d24abb286edff9adee831a4b3351fc47259043f0d683d7a25be7e47aff3aedca140005d866e218c8efcca32093c19bbece50bd96656d0f94a712d3c60d1e5342db86482fc73f05faf513ca0b137378126597b95986c372b412c953e97011259aab0839fe453c756559497a28ba88dce009e1e7980436131029d38e56a34f608e6471970d9959068808c898608024db9eb394c4feae7a364ea9272ec4ea2315a9f0407a4b27d5e49a8ab1e3ddce5c84927d5aecd7e68e4437a820ea8743c6b5b4e2abbb47b0001e2f77ceac4603e8774e4ccbc1adde794428c11ae4a7492727b620334302e63f72b0c06c1cf83800366916ee8295176819272d557863a831ee0a576841191482959aad69095831fa1d64e3e0e6f6c6a751bcdadf0fbaa27a17458709f708c04587cb208984c9525da6786e0e5aabefe30ad1dbbef66e85ce9d6dbe456fd85e4135de5cf16d9455976d7ca8de7b1b530661c74c0fae90c0fff1a2b5fcdfab19fcff75fadcec445ed8af6ab5babf1463e08458918be8045083de6db988c37e4be582cfac5cdf741d1f0322fb2902665c7ff347813348109e5d442e91fcb010c28f042da481e807084fcb4759b40ccf2cae77bad00cdfbfba4acf36aa1f74c30a315e3d7f1ca522b6306e8903352aafa51dc523d582d418934398d5eb88120e3656bfb640a239db507b285302a86855ea850ddc9af72fc62dc79336c9bc29ee8314c65adb0574e9c701d73d7fa977edd1d52a1ff2da5b8b94e1a0fdd01ffcc6583758f0a1f51750e45f12b58c6d38b140e5676cf3474224520ef7c52ca5e634f85456651f3d6f43d016ed7cc5da54ea640a3bc50c2b9d3dea8f93c0340d66ccd06efc5ae002108c33cf3a470c4a50f6a6ca2f11b8ad15511688c282b94ba6f1c332e239d10946dc46f763f08d12cb9edc1e79c0e07f7151f548e6d7d20ec13b52d911bf980cac60694e192651403c9a69abea045190e847be093fc9ba43fec55b32f77f5796ddca25b441f259d5c51e06df6c6588c6414899481ba9e06bcebec58f82ff3021b09c6beae13a5d22bc94870f72ab813d0c0be01d91f3d075192e7a5de765599d72244757d09539529a8347e077a36678166e5ed9f73a5aad2e147d8154095c397e3e5e4ba1987ca64c1301a0c6c3e438097ede9b701a105ec38fcb54abb31b367c7740cd9ac459e561094a34f01acee555e60267157e6", "test"}, {"$odf$*0*0*1024*16*43d3dbd907785c4fa5282a2e73a5914db3372505*8*b3d676d4519e6b5a*16*34e3f7fdfa67fb0078360b0df4011270*0*7eff7a7abf1e6b0c4a9fafe6bdcfcfeaa5b1886592a52bd255f1b51096973d6fa50d792c695f3ef82c6232ae7f89c771e27db658258ad029e82415962b270d2c859b0a3efb231a0519ec1c807082638a9fad7537dec22e20d59f2bfadfa84dd941d59dd07678f9e60ffcc1eb27d8a2ae47b616618e5e80e27309cd027724355bf78b03d5432499c1d2a91d9c67155b7f49e61bd8405e75420d0cfb9e64b238623a9d8ceb47a3fdb5e7495439bb96e79882b850a0c8d3c0fbef5e6d425ae359172b9a82ec0566c3578a9f07b86a70d75b5ad339569c1c8f588143948d63bdf88d6ed2e751ac07f25ecc5778dc06247e5a9edca869ee3335e5dae351666a618d00ec05a35bc73d330bef12a46fb53b2ff96e1b2919af4e692730b9c9664aca761df10d6cf55396c4d4c268e6e96c96515c527c8fe2716ac7a9f016941aa46e6b03e8a5069c29ec8e8614b7da3e2e154a77510393051a0b693ae40da6afb5712a4ce4ac0ebacda1f45bdccc8a7b21e153d1471665cae3205fbfa00129bf00c06777bfecba2c43a1481a00111b4f0bd30c2378bd1e2e219700406411c6f897a3dfa51b31613cb241d56b68f3c241428783b353be26fa8b2df68ca215d1cf892c10fdef94faf2381a13f8cb2bce1a7dbb7522ef0b2a83e5a96ca66417fd2928784054e80d74515c1582ad356dd865837b5ea90674a30286a72a715f621c9226f19a321b413543fbbdb7cd9d1f99668b19951304e7267554d87992fbf9a96116601d0cee9e23cb22ba474c3f721434400cacf15bae05bbe9fa17f69967d03689c48a26fa57ff9676c96767762f2661b6c8f8afa4f96f989086aa02b6f8d039c6f4d158cc33a56cbf77640fb5087b2d5a5251692bb9255d0ae8148c7157c40031fdb0ea90d5fab546a7e1e1c15bd6a27f3716776c8a3fdbdd4f34c19fef22c36117c124876606b1395bf96266d647aaf5208eefd729a42a4efe42367475315a979fb74dcb9cd30917a811ed8283f2b111bb5a5d2b0f5589b3652f17d23e352e1494f231027bb93209e3c6a0388f8b2214577dca8aa9d705758aa334d6947491488770ed8066f692f8922ff0d852c2d0f965ab3d8a13c6de0ef3cff5a15ee7b64f9b1003817f0cb919ad021d5f3b0b5c1ad58db22e8fbd63abfb40e61065bad008cdffbbe3c563780a548f4515df5c935d9aa2a3033bc8a4011c9c173a0366c9b7b07f2a27de0e55373fb4b0c7726997be6f410a2ee5980393ea005516e89538be796131e450403420d72cdbd75475fd11c50efce5eb340d55d2dd0a67ca45ddb53aa582a2ec56b46452e26a505bf730998513837c96a121e4ad13af5030392ff7fb660955e03f65894733862f2367d529f0e8cdb73272b9ce01491747cb3e1a22f5c85ab6d40ddd35d15b9d46d73600e0971da90f93cb0e9be357c4f1227fbf5b123e5b", "jumper9"}, {"$odf$*0*0*1024*16*4ec0370ab589f943131240e407a35b58a341e052*8*19cadc01889f78c0*16*dcfcb8baccda277764e4e99833ab9640*0*a7bd859d68298fbdc36b6b51eb06f7055befe08f76ca9833c6e298db8ed971bfd1315065a19e1b31b8a93624757a2583816f35d6f251ff7943be626b3dc72f0b320c9ce5d80b7cc676aa02e6a4996abd752da573ecc339d2c80a2c8bfc28a9f4ceea51c2969adf20c8762b2ee0b1835bbd31bd90d5a638cfe523a596ea95feca64ae20010ad9957a724143e25a875f3cec3cedb4df1c16ac82b46b35db269da98270c813acd5e55a2c138306decdf96b1c1079d9cfd3704d519fbc5a4a547ba5286a7e80dc434f1bf34260433cbb79c4bcbb2a5bfc5a6c2430944ef2e34e7b9c76b21a97003c1fa85f6e9c4ed984108a7d301afe4a8f6625502a4bf17b24e009717c711571da2d6acd25868892bb9e29a77da8018222cd57c91d9aad96c954355e50a4760f08aa1f1b4257f7eb1a235c9234e8fc4ed97e8ad3e5d7d128807b726a4eb0038246d8580397c0ff5873d34b5a688a4a931be7c5737e5ada3e830b02d3efb075e338d71be55751a765a21d560933812856986a4d0d0a6d4954c50631fa3dff8565057149c4c4951858be4d5dca8e492093cfd88b56a19a161e7595e2e98764e91eb51c5289dc4efa65c7b207c517e269e3c699373fe1bf177c5d641cf2cfa4bd2afe8bff53a98b2d64bedc5a2e2f2973416c66791cf012696a0e95f7a4dadb86f925fc1943cb2b75fb3eda30f7779edff7cce95ae6f0f7b45ac207a4de4ec012a3654103136e11eb496276647d5e8f6e1659951fc7ef78d60e9430027e826f2aaab7c93ef58a5af47b92cec2f17903a26e2cc5d8d09b1db55e568bfb23a6b6b46125daf71a2f3a708676101d1b657cd38e81deb74d5d877b3321349cd667c29359b45b82218ad96f6c805ac3439fc63f0c91d66da36bae3f176c23b45b8ca1945fb4a4cea5c4a7b0f6ffd547614e7016f94d3e7889ccac868578ea779cd7e6b015aafd296dd5e2da2aa7e2f2af2ce6605f53613f069194dff35ffb9a2ebb30e011c26f669ededa2c91ffb06fedc44cf23f35d7d2716abcd50a8f561721d613d8f2c689ac245a5ac084fa86c72bbe80da7d508e63d891db528fa9e8f0d608034cd97dfde70f739857672e2d70070e850c3a6521067c1774244b86cca835ca8ff1748516e694ea2b5b42555f0df9cb9ec78825c351df51a76b6fe23b58ab3e87ba94ffbb98c9fa9d50c0c282ed0e506bcad24c02d8b625b4bdac822a9e5c911d095c5e4d3bf03448add978e0e7fab7f8a7008568f01a4f06f155223086bdcfe6879e76f199afb9caeadebaa9ec4ec8120f4ccfc4f5f7d7e3cc4dd0cba4d11546d8540030769c4b6d54abdd51fa1f30da642e5ff5c35d3e711c8931ff79e9f256ac6416e99943b0000bf32a5efdd5cf1cd668a62381febe959ca472be9c1a9bade59dbba07eb035ddb1e64ae2923bd276deed788db7600d776f49339215", "RickRoll"}, {"$odf$*0*0*1024*16*399a33262bbef99543bae29a6bb069c36e3a8f1b*8*6b721193b04fa933*16*99a6342ca7221c81890035dc5033c16f*0*ef8692296b67a8a77344e87b6193dc0a370b115d9e8c85e901c1a19d03ee2a34b7bf989bf9c2edab61022ea49f2a3ce5a6c807af374afd21b52ccbd0aa13784c73d2c8feda1fe0c8ebbb94e46e32904d95d1f135759e2733c2bd30b8cb0050c1cb8a2336c1151c498b9609547e96243aed9473e0901b55137ed78e2c6057e5826cfbfb94b0d77cb12b1fb6ac2752ea71c9c05cdb6a2f3d9611cb24f6e23065b408601518e3182ba1b8cef4cfcdf6ceecb2f33267cf733d3da715562e6977015b2b6423fb416781a1b6a67252eec46cda2741163f86273a68cd241a06263fdd8fc25f1c30fd4655724cc3e5c3d8f3e84abf446dd545155e440991c5fa613b7c18bd0dabd1ad45beb508cfb2b08d4337179cba63df5095b3d640eadbd72ca07f5c908241caf384ca268355c0d13471c241ea5569a5d04a9e3505883eb1c359099c1578e4bc33a73ba74ceb4a0520e0712e3c88582549a668a9c11b8680368cfbc3c5ec02663ddd97963d9dacefed89912ffa9cd945a8634a653296163bb873f3afd1d02449494fab168e7f652230c16d35853df1164219c04c4bd17954b85eb1939d87412eeeb2a039a8bb087178c03a9a40165a28a985e8bc443071b3764d846d342ca2073223f9809fe2ee3a1dfa65b9d897877ebb33a48a760c8fb32062b51a96421256a94896e93b41f559fdec7743680a8deacff9132d6129574d1a62be94308b195d06a275947a1455600030468dde53639fd239a8ab074ec1c7f661f2c9e8d60d6e0e743d351017d5c3d3be21b67d05310d0c5f3fd670acd95ca24f91b0d84d761d15259848f736ff08610e300c31b242f6d24ac2418cdd1fe0248f8a2a2f5775c08e5571c8d25d65ff573cc403ea9cad3bafd56c166fbcec9e64909df3c6ec8095088a8992493b7180c4dbb4053dcb55d9c5f46d728a97ae4ec7ac4b5941bcc3b64a4af31f7dc673e6715a52c9cdbe23dc21e51784f8314c019fc90e8612fcffe01d026fd9e15d1474e73dedf1d3830da81320097be6953173e4293372b5e5a8ecc49ac8b1a658cff16ffa04a8c1728d02ab67694170f10bc9030939ff6df3f901faa019d9b9fd2ba23e89eb0bbaf7a69a2272ee1df0403e6435aee147da217e8bf4c1ee5c53eb83aac1b3f8772d5cd2a2686f312ac4f4f2b0733593e28305a550dbbd18d3405a464ff20e0d9364cfe49b82a97ef7303aec92004a3476cf9ad012eaaf10fd07d3823e1b6871e82113ecfe4392854de9ab21ab1e33ce93d1abb07018007f50d641c8eb85b28fd335fd2281745772c98f8f0bba3f4d40ba602545ef8a0db3062f02d7ee5f49b42cbe19c0c2124952f98c49aff6927110314e54fe8d47a10f13d2d4055c1f3f2d679d4043c9b2f68b2220b6c6c738f6402c01d000c9394c8ed27e70c7ee6108d3e7e809777bab9be30b33a3fb83271cbf3b", "WhoCanItBeNow"}, {NULL} }; static cl_int cl_error; static odf_password *inbuffer; static odf_hash *outbuffer; static odf_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define OCL_CONFIG "odf" #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return get_platform_vendor_id(platform_id) == DEV_INTEL ? 8 : 1; else return 64; } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(odf_password) * gws; outsize = sizeof(odf_hash) * gws; settingsize = sizeof(odf_salt); cracked_size = sizeof(*crypt_out) * gws; inbuffer = mem_calloc(insize); outbuffer = mem_alloc(outsize); saved_key = mem_calloc(sizeof(*saved_key) * gws); crypt_out = mem_calloc(cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(saved_key); MEM_FREE(crypt_out); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); } static void init(struct fmt_main *self) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(odf_password), 0); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$odf$*", 6)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtok(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0) { goto err; } if ((p = strtok(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iterations */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res > 16) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res > 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* something */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* content */ goto err; res = strlen(p); if (res > 2048 || res & 1) goto err; if (!ishex(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static odf_cpu_salt cs; ctcopy += 6; /* skip over "$odf$*" */ p = strtok(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtok(NULL, "*"); cs.checksum_type = atoi(p); p = strtok(NULL, "*"); cs.iterations = atoi(p); p = strtok(NULL, "*"); cs.key_size = atoi(p); p = strtok(NULL, "*"); /* skip checksum field */ p = strtok(NULL, "*"); cs.iv_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.salt_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); p = strtok(NULL, "*"); memset(cs.content, 0, sizeof(cs.content)); for (i = 0; p[i * 2] && i < 1024; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; cs.content_length = i; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$odf$*" */ p = strtok(ctcopy, "*"); p = strtok(NULL, "*"); p = strtok(NULL, "*"); p = strtok(NULL, "*"); p = strtok(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (odf_cpu_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length); currentsalt.length = cur_salt->salt_length; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->key_size; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } #undef set_key static void set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index; global_work_size = (count + local_work_size - 1) / local_work_size * local_work_size; #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { unsigned char hash[20]; SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); SHA1_Final((unsigned char *)hash, &ctx); memcpy(inbuffer[index].v, hash, 20); inbuffer[index].length = 20; } /// Copy data to gpu HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, &local_work_size, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { BF_KEY bf_key; SHA_CTX ctx; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, (unsigned char*)outbuffer[index].v); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->content_length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->content_length); SHA1_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_odf = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif odf_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
loop_fusion_par.c
void compute(unsigned long **a, unsigned long **b, unsigned long **c, unsigned long **d, int N, int num_threads) { // perform loop fusion to transform this loop and parallelize it with OpenMP #pragma omp parallel for for (int i = 1; i < N; i++) { for (int j = 1; j < N; j++) { a[i][j] = 2 * b[i][j]; d[i][j] = a[i][j] * c[i][j]; } for (int j=1; j<N;++j) c[i][j - 1] = a[i][j-1] - a[i][j+1]; } }
dpado.202001141642.batch_number_limit.h
// // Created by Zhen Peng on 1/6/20. // #ifndef PADO_DPADO_H #define PADO_DPADO_H #include <vector> //#include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> //#include <xmmintrin.h> #include <immintrin.h> #include <bitset> #include <math.h> #include <fstream> #include <omp.h> #include "globals.h" #include "dglobals.h" #include "dgraph.h" namespace PADO { template <VertexID BATCH_SIZE = 1024> class DistBVCPLL { private: static const VertexID BITPARALLEL_SIZE = 50; const inti THRESHOLD_PARALLEL = 80; // Structure for the type of label struct IndexType { // struct Batch { // VertexID batch_id; // Batch ID // VertexID start_index; // Index to the array distances where the batch starts // VertexID size; // Number of distances element in this batch // // Batch() = default; // Batch(VertexID batch_id_, VertexID start_index_, VertexID size_): // batch_id(batch_id_), start_index(start_index_), size(size_) // { } // }; struct DistanceIndexType { VertexID start_index; // Index to the array vertices where the same-distance vertices start VertexID size; // Number of the same-distance vertices UnweightedDist dist; // The real distance DistanceIndexType() = default; DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_): start_index(start_index_), size(size_), dist(dist_) { } }; // Bit-parallel Labels UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} // std::vector<Batch> batches; // Batch info std::vector<DistanceIndexType> distances; // Distance info std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID size_t size() const { return sizeof(bp_dist) + sizeof(bp_sets) + // batches.size() * sizeof(Batch) + distances.size() * sizeof(DistanceIndexType) + vertices.size() * sizeof(VertexID); } }; //__attribute__((aligned(64))); struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, it helps update_label_indices() and can be reset along with other indicator elements. // std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // If the Batch structure is not used, the indicator could just be BATCH_SIZE long. std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE, 0); // std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0); // Use a queue to store candidates std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE); VertexID end_candidates_que = 0; std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0); void indicator_reset() { std::fill(indicator.begin(), indicator.end(), 0); } }; //__attribute__((aligned(64))); // Type of Bit-Parallel Label struct BPLabelType { UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 }; uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0} }; // Type of Label Message Unit, for initializing distance table struct LabelTableUnit { VertexID root_id; VertexID label_global_id; UnweightedDist dist; LabelTableUnit() = default; LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : root_id(r), label_global_id(l), dist(d) {} }; // Type of BitParallel Label Message Unit for initializing bit-parallel labels struct MsgBPLabel { VertexID r_root_id; UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; MsgBPLabel() = default; MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) : r_root_id(r) { memcpy(bp_dist, dist, sizeof(bp_dist)); memcpy(bp_sets, sets, sizeof(bp_sets)); } }; VertexID num_v = 0; VertexID num_masters = 0; // VertexID BATCH_SIZE = 0; int host_id = 0; int num_hosts = 0; MPI_Datatype V_ID_Type; std::vector<IndexType> L; inline void bit_parallel_push_labels( const DistGraph &G, VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, UnweightedDist iter); inline void bit_parallel_labeling( const DistGraph &G, std::vector<uint8_t> &used_bp_roots); // inline void bit_parallel_push_labels( // const DistGraph &G, // VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // UnweightedDist iter); // inline void bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots); inline void batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, const VertexID roots_size, const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated); // std::vector<bool> &once_candidated); inline VertexID initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots); // inline void push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter); inline void local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); inline void local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); // inline void local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline bool distance_query( VertexID cand_root_id, VertexID v_id, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter); inline void insert_label_only_seq( VertexID cand_root_id, // VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send); // UnweightedDist iter); inline void insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send); inline void update_label_indices( const VertexID v_id, const VertexID inserted_count, // std::vector<IndexType> &L, // std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter); inline void reset_at_end( // const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table); // template <typename E_T, typename F> // inline void every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun); template <typename E_T> inline void one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv); // // Function: get the destination host id which is i hop from this host. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_me_host_id(int hop) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // return (host_id + hop + num_hosts) % num_hosts; // } // // Function: get the destination host id which is i hop from the root. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_root_host_id(int hop, int root) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // assert(root >= 0 && root < num_hosts); // return (root + hop + num_hosts) % num_hosts; // } size_t get_index_size() { size_t bytes = 0; for (VertexID v_i = 0; v_i < num_masters; ++v_i) { bytes += L[v_i].size(); } return bytes; } // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // uint64_t normal_check_count = 0; // uint64_t total_candidates_num = 0; // uint64_t set_candidates_num = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //L2CacheMissRate cache_miss; // double message_time = 0; // double bp_labeling_time = 0; // double initializing_time = 0; // double scatter_time = 0; // double gather_time = 0; // double clearup_time = 0; // TotalInstructsExe candidating_ins_count; // TotalInstructsExe adding_ins_count; // TotalInstructsExe bp_labeling_ins_count; // TotalInstructsExe bp_checking_ins_count; // TotalInstructsExe dist_query_ins_count; // End test public: // std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0); DistBVCPLL() = default; explicit DistBVCPLL( const DistGraph &G); // UnweightedDist dist_distance_query_pair( // VertexID a_global, // VertexID b_global, // const DistGraph &G); }; // class DistBVCPLL template <VertexID BATCH_SIZE> DistBVCPLL<BATCH_SIZE>:: DistBVCPLL( const DistGraph &G) { num_v = G.num_v; assert(num_v >= BATCH_SIZE); num_masters = G.num_masters; host_id = G.host_id; // { // if (1 == host_id) { // volatile int i = 0; // while (i == 0) { // sleep(5); // } // } // } num_hosts = G.num_hosts; V_ID_Type = G.V_ID_Type; // L.resize(num_v); L.resize(num_masters); VertexID remainer = num_v % BATCH_SIZE; VertexID b_i_bound = num_v / BATCH_SIZE; std::vector<uint8_t> used_bp_roots(num_v, 0); //cache_miss.measure_start(); double time_labeling = -WallTimer::get_time_mark(); // bp_labeling_time -= WallTimer::get_time_mark(); bit_parallel_labeling(G, used_bp_roots); // bp_labeling_time += WallTimer::get_time_mark(); // {//test ////#ifdef DEBUG_MESSAGES_ON // if (0 == host_id) { // printf("host_id: %u bp_labeling_finished.\n", host_id); // } ////#endif // } std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue. VertexID end_active_queue = 0; std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. // std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue. VertexID end_got_candidates_queue = 0; std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue // std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue std::vector<ShortIndex> short_index(num_masters); std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST)); std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue. // Used mainly for resetting short_index[v].indicator. VertexID end_once_candidated_queue = 0; std::vector<uint8_t> once_candidated(num_masters, false); // std::vector<bool> once_candidated(num_masters, false); std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table. std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels //printf("b_i_bound: %u\n", b_i_bound);//test for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { {// Batch number limit if (10 == b_i) { remainer = 0; break; } } // { ////#ifdef DEBUG_MESSAGES_ON // if (0 == host_id) { // printf("b_i: %u\n", b_i);//test // } ////#endif // } batch_process( G, // b_i, b_i * BATCH_SIZE, BATCH_SIZE, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // exit(EXIT_SUCCESS); //test } if (remainer != 0) { // { ////#ifdef DEBUG_MESSAGES_ON // if (0 == host_id) { // printf("b_i: %u\n", b_i_bound);//test // } ////#endif // } batch_process( G, // b_i_bound, b_i_bound * BATCH_SIZE, remainer, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); } time_labeling += WallTimer::get_time_mark(); //cache_miss.measure_stop(); // Test setlocale(LC_NUMERIC, ""); if (0 == host_id) { printf("BATCH_SIZE: %u\n", BATCH_SIZE); printf("BP_Size: %u\n", BITPARALLEL_SIZE); } {// Total Number of Labels EdgeID local_num_labels = 0; for (VertexID v_global = 0; v_global < num_v; ++v_global) { if (G.get_master_host_id(v_global) != host_id) { continue; } local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size(); } EdgeID global_num_labels; MPI_Allreduce(&local_num_labels, &global_num_labels, 1, MPI_Instance::get_mpi_datatype<EdgeID>(), MPI_SUM, MPI_COMM_WORLD); // printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v); } // VertexID local_num_batches = 0; // VertexID local_num_distances = 0; //// double local_avg_distances_per_batches = 0; // for (VertexID v_global = 0; v_global < num_v; ++v_global) { // if (G.get_master_host_id(v_global) != host_id) { // continue; // } // VertexID v_local = G.get_local_vertex_id(v_global); // local_num_batches += L[v_local].batches.size(); // local_num_distances += L[v_local].distances.size(); //// double avg_d_p_b = 0; //// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) { //// avg_d_p_b += L[v_local].batches[i_b].size; //// } //// avg_d_p_b /= L[v_local].batches.size(); //// local_avg_distances_per_batches += avg_d_p_b; // } //// local_avg_distances_per_batches /= num_masters; //// double local_avg_batches = local_num_batches * 1.0 / num_masters; //// double local_avg_distances = local_num_distances * 1.0 / num_masters; // uint64_t global_num_batches = 0; // uint64_t global_num_distances = 0; // MPI_Allreduce( // &local_num_batches, // &global_num_batches, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_batches /= num_hosts; // MPI_Allreduce( // &local_num_distances, // &global_num_distances, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_distances /= num_hosts; // double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches; // double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances; // double global_avg_batches = global_num_batches / num_v; // double global_avg_distances = global_num_distances / num_v; //// MPI_Allreduce( //// &local_avg_distances_per_batches, //// &global_avg_d_p_b, //// 1, //// MPI_DOUBLE, //// MPI_SUM, //// MPI_COMM_WORLD); //// global_avg_d_p_b /= num_hosts; // MPI_Barrier(MPI_COMM_WORLD); // if (0 == host_id) { // printf("global_avg_batches: %f " // "global_avg_distances: %f " // "global_avg_distances_per_batch: %f " // "global_avg_labels_per_distance: %f\n", // global_avg_batches, // global_avg_distances, // global_avg_d_p_b, // global_avg_l_p_d); // } } // printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); // printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100); // uint64_t total_check_count = bp_hit_count + normal_check_count; // printf("total_check_count: %'llu\n", total_check_count); // printf("bp_hit_count: %'llu %.2f%%\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count); // printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n", // total_candidates_num, // set_candidates_num, // set_candidates_num * 100.0 / total_candidates_num); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); //cache_miss.print(); // printf("Candidating: "); candidating_ins_count.print(); // printf("Adding: "); adding_ins_count.print(); // printf("BP_Labeling: "); bp_labeling_ins_count.print(); // printf("BP_Checking: "); bp_checking_ins_count.print(); // printf("distance_query: "); dist_query_ins_count.print(); // printf("num_hosts: %u host_id: %u\n" // "Local_labeling_time: %.2f seconds\n" // "bp_labeling_time: %.2f %.2f%%\n" // "initializing_time: %.2f %.2f%%\n" // "scatter_time: %.2f %.2f%%\n" // "gather_time: %.2f %.2f%%\n" // "clearup_time: %.2f %.2f%%\n" // "message_time: %.2f %.2f%%\n", // num_hosts, host_id, // time_labeling, // bp_labeling_time, 100.0 * bp_labeling_time / time_labeling, // initializing_time, 100.0 * initializing_time / time_labeling, // scatter_time, 100.0 * scatter_time / time_labeling, // gather_time, 100.0 * gather_time / time_labeling, // clearup_time, 100.0 * clearup_time / time_labeling, // message_time, 100.0 * message_time / time_labeling); double global_time_labeling; MPI_Allreduce(&time_labeling, &global_time_labeling, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("num_hosts: %d " "Global_labeling_time: %.2f seconds\n", num_hosts, global_time_labeling); } // End test } //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling( // const DistGraph &G, // std::vector<uint8_t> &used_bp_roots) //{ //// VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; // // std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_v); // active queue // std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // used_bp_roots[r] = true; // // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. //// VertexID i_bound = G.vertices[r] - 1; //// VertexID i_start = i_bound + G.out_degrees[r]; //// for (VertexID i = i_start; i > i_bound; --i) { // //int i_bound = G.vertices[r]; // //int i_start = i_bound + G.out_degrees[r] - 1; // //for (int i = i_start; i >= i_bound; --i) { // VertexID d_i_bound = G.local_out_degrees[r]; // EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1; // for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) { // EdgeID i = i_start - d_i; // VertexID v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = true; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // //} //// } // // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) { // VertexID v = que[que_i]; //// bit_parallel_push_labels(G, //// v, //// que, //// que_h, //// sibling_es, //// num_sibling_es, //// child_es, //// num_child_es, //// tmp_d, //// d); // EdgeID i_start = G.vertices_idx[v]; // EdgeID i_bound = i_start + G.local_out_degrees[v]; // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv = G.out_edges[i]; // UnweightedDist td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) { // que[que_h++] = tv; // tmp_d[tv] = td; // } // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; // } // } // } // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // // {// test // printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (4 == d) { //// exit(EXIT_SUCCESS); //// } // } // // que_t0 = que_t1; // que_t1 = que_h; // } // // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_push_labels( const DistGraph &G, const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, const UnweightedDist iter) { EdgeID i_start = G.vertices_idx[v_global]; EdgeID i_bound = i_start + G.local_out_degrees[v_global]; // {//test // printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); // } for (EdgeID i = i_start; i < i_bound; ++i) { VertexID tv_global = G.out_edges[i]; VertexID tv_local = G.get_local_vertex_id(tv_global); UnweightedDist td = iter + 1; if (iter > dists[tv_local]) { ; } else if (iter == dists[tv_local]) { if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global; tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global; ++size_tmp_sibling_es; // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; } } else { // iter < dists[tv] if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) { tmp_q[offset_tmp_q + size_tmp_q++] = tv_global; } } // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global; tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global; ++size_tmp_child_es; // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; } } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_labeling( const DistGraph &G, // std::vector<IndexType> &L, std::vector<uint8_t> &used_bp_roots) { // Class type of Bit-Parallel label message unit. struct MsgUnitBP { VertexID v_global; uint64_t S_n1; uint64_t S_0; MsgUnitBP() = default; // MsgUnitBP(MsgUnitBP&& other) = default; // MsgUnitBP(MsgUnitBP& other) = default; // MsgUnitBP& operator=(const MsgUnitBP& other) = default; // MsgUnitBP& operator=(MsgUnitBP&& other) = default; MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) : v_global(v), S_n1(sn1), S_0(s0) { } }; // VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; EdgeID local_num_edges = G.num_edges_local; std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<VertexID> que(num_masters); // active queue VertexID end_que = 0; std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que VertexID end_tmp_que = 0; std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. VertexID r_global = 0; // root r for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // {// test // if (0 == host_id) { // printf("i_bpsp: %u\n", i_bpspt); // } // } // Select the root r_global if (0 == host_id) { while (r_global < num_v && used_bp_roots[r_global]) { ++r_global; } if (r_global == num_v) { for (VertexID v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; } continue; } } // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); MPI_Bcast(&r_global, 1, V_ID_Type, 0, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // Mark the r_global if (G.get_master_host_id(r_global) == host_id) { tmp_d[G.get_local_vertex_id(r_global)] = 0; que[end_que++] = r_global; } // Select the r_global's 64 neighbors { // Get r_global's neighbors into buffer_send, rank from high to low. VertexID local_degree = G.local_out_degrees[r_global]; std::vector<VertexID> buffer_send(local_degree); if (local_degree) { EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; for (VertexID d_i = 0; d_i < local_degree; ++d_i) { EdgeID e_i = e_i_start - d_i; buffer_send[d_i] = G.out_edges[e_i]; } } // Get selected neighbors (up to 64) std::vector<VertexID> selected_nbrs; if (0 != host_id) { // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); MPI_Instance::send_buffer_2_dst(buffer_send, 0, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // Receive selected neighbors from host 0 MPI_Instance::recv_buffer_from_src(selected_nbrs, 0, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); } else { // Host 0 // Host 0 receives neighbors from others std::vector<VertexID> all_nbrs(buffer_send); std::vector<VertexID > buffer_recv; for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); MPI_Instance::recv_buffer_from_any(buffer_recv, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); if (buffer_recv.empty()) { continue; } buffer_send.resize(buffer_send.size() + buffer_recv.size()); std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); all_nbrs.resize(buffer_send.size()); all_nbrs.assign(buffer_send.begin(), buffer_send.end()); } assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // Select 64 (or less) neighbors VertexID ns = 0; // number of selected neighbor, default 64 for (VertexID v_global : all_nbrs) { if (used_bp_roots[v_global]) { continue; } used_bp_roots[v_global] = 1; selected_nbrs.push_back(v_global); if (++ns == 64) { break; } } // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); for (int dest = 1; dest < num_hosts; ++dest) { MPI_Instance::send_buffer_2_dst(selected_nbrs, dest, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); } // message_time += WallTimer::get_time_mark(); } // {//test // printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); // } // Synchronize the used_bp_roots. for (VertexID v_global : selected_nbrs) { used_bp_roots[v_global] = 1; } // Mark selected neighbors for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { VertexID v_global = selected_nbrs[v_i]; if (host_id != G.get_master_host_id(v_global)) { continue; } tmp_que[end_tmp_que++] = v_global; tmp_d[G.get_local_vertex_id(v_global)] = 1; tmp_s[v_global].first = 1ULL << v_i; } } // Reduce the global number of active vertices VertexID global_num_actives = 1; UnweightedDist d = 0; while (global_num_actives) { //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("d: %u que_size: %u\n", d, global_num_actives); // } // } //#endif // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { VertexID num_sibling_es = 0, num_child_es = 0; // Send active masters to mirrors { std::vector<MsgUnitBP> buffer_send(end_que); for (VertexID que_i = 0; que_i < end_que; ++que_i) { VertexID v_global = que[que_i]; buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); } // {// test // printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); // } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgUnitBP> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } // For parallel adding to queue VertexID size_buffer_recv = buffer_recv.size(); std::vector<VertexID> offsets_tmp_q(size_buffer_recv); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) { offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global]; } VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q); std::vector<VertexID> tmp_q(num_neighbors); std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0); // For parallel adding to sibling_es std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors); std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0); // For parallel adding to child_es std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors); std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0); #pragma omp parallel for // for (const MsgUnitBP &m : buffer_recv) { for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgUnitBP &m = buffer_recv[i_m]; VertexID v_global = m.v_global; if (!G.local_out_degrees[v_global]) { continue; } tmp_s[v_global].first = m.S_n1; tmp_s[v_global].second = m.S_0; // Push labels bit_parallel_push_labels( G, v_global, tmp_q, sizes_tmp_q[i_m], tmp_sibling_es, sizes_tmp_sibling_es[i_m], tmp_child_es, sizes_tmp_child_es[i_m], offsets_tmp_q[i_m], // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, tmp_d, d); } {// From tmp_sibling_es to sibling_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es); PADO::collect_into_queue( tmp_sibling_es, offsets_tmp_q, sizes_tmp_sibling_es, total_size_tmp, sibling_es, num_sibling_es); } {// From tmp_child_es to child_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es); PADO::collect_into_queue( tmp_child_es, offsets_tmp_q, sizes_tmp_child_es, total_size_tmp, child_es, num_child_es); } {// From tmp_q to tmp_que idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q); PADO::collect_into_queue( tmp_q, offsets_tmp_q, sizes_tmp_q, total_size_tmp, tmp_que, end_tmp_que); } // {// test // printf("host_id: %u root: %u done push.\n", host_id, root); // } } } // Update the sets in tmp_s { #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first, w = sibling_es[i].second; __atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST); // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; } // Put into the buffer sending to others std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first; VertexID w = sibling_es[i].second; buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); } // Send the messages for (int root = 0; root < num_hosts; ++root) { std::vector< std::pair<VertexID, uint64_t> > buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } size_t i_m_bound = buffer_recv.size(); #pragma omp parallel for for (size_t i_m = 0; i_m < i_m_bound; ++i_m) { const auto &m = buffer_recv[i_m]; __atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST); } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } } #pragma omp parallel for for (VertexID i = 0; i < num_child_es; ++i) { VertexID v = child_es[i].first, c = child_es[i].second; __atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST); // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; } } //#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // //// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (0 == d) { //// exit(EXIT_SUCCESS); //// } // } //#endif // Swap que and tmp_que tmp_que.swap(que); end_que = end_tmp_que; end_tmp_que = 0; MPI_Allreduce(&end_que, &global_num_actives, 1, V_ID_Type, MPI_SUM, MPI_COMM_WORLD); // } ++d; } #pragma omp parallel for for (VertexID v_local = 0; v_local < num_masters; ++v_local) { VertexID v_global = G.get_global_vertex_id(v_local); L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_push_labels( // const DistGraph &G, // const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // const UnweightedDist iter) //{ // EdgeID i_start = G.vertices_idx[v_global]; // EdgeID i_bound = i_start + G.local_out_degrees[v_global]; //// {//test //// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); //// } // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv_global = G.out_edges[i]; // VertexID tv_local = G.get_local_vertex_id(tv_global); // UnweightedDist td = iter + 1; // // if (iter > dists[tv_local]) { // ; // } else if (iter == dists[tv_local]) { // if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; // } // } else { // iter < dists[tv] // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; //// { //// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test //// } // } // } // //} // //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots) //{ // // Class type of Bit-Parallel label message unit. // struct MsgUnitBP { // VertexID v_global; // uint64_t S_n1; // uint64_t S_0; // // MsgUnitBP() = default; //// MsgUnitBP(MsgUnitBP&& other) = default; //// MsgUnitBP(MsgUnitBP& other) = default; //// MsgUnitBP& operator=(const MsgUnitBP& other) = default; //// MsgUnitBP& operator=(MsgUnitBP&& other) = default; // MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) // : v_global(v), S_n1(sn1), S_0(s0) { } // }; //// VertexID num_v = G.num_v; //// EdgeID num_e = G.num_e; // EdgeID local_num_edges = G.num_edges_local; // // std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_masters); // active queue // VertexID end_que = 0; // std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que // VertexID end_tmp_que = 0; // std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. // //// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v //// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} //// std::vector<VertexID> que(num_v); // active queue //// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) //// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r_global = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // // Select the root r_global // if (0 == host_id) { // while (r_global < num_v && used_bp_roots[r_global]) { // ++r_global; // } // if (r_global == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // } // // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&r_global, // 1, // V_ID_Type, // 0, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // //// VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // // Mark the r_global // if (G.get_master_host_id(r_global) == host_id) { // tmp_d[G.get_local_vertex_id(r_global)] = 0; // que[end_que++] = r_global; // } // // Select the r_global's 64 neighbors // { // // Get r_global's neighbors into buffer_send, rank from low to high. // VertexID local_degree = G.local_out_degrees[r_global]; // std::vector<VertexID> buffer_send(local_degree); // if (local_degree) { // EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; // for (VertexID d_i = 0; d_i < local_degree; ++d_i) { // EdgeID e_i = e_i_start - d_i; // buffer_send[d_i] = G.out_edges[e_i]; // } // } // // // Get selected neighbors (up to 64) // std::vector<VertexID> selected_nbrs; // if (0 != host_id) { // // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // 0, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); // // Receive selected neighbors from host 0 // MPI_Instance::recv_buffer_from_src(selected_nbrs, // 0, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // } else { // // Host 0 // // Host 0 receives neighbors from others // std::vector<VertexID> all_nbrs(buffer_send); // std::vector<VertexID > buffer_recv; // for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); // MPI_Instance::recv_buffer_from_any(buffer_recv, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); //// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv, //// num_hosts, //// SENDING_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // if (buffer_recv.empty()) { // continue; // } // // buffer_send.resize(buffer_send.size() + buffer_recv.size()); // std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); // all_nbrs.resize(buffer_send.size()); // all_nbrs.assign(buffer_send.begin(), buffer_send.end()); // } // assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // // Select 64 (or less) neighbors // VertexID ns = 0; // number of selected neighbor, default 64 // for (VertexID v_global : all_nbrs) { // if (used_bp_roots[v_global]) { // continue; // } // used_bp_roots[v_global] = 1; // selected_nbrs.push_back(v_global); // if (++ns == 64) { // break; // } // } // // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); // for (int dest = 1; dest < num_hosts; ++dest) { // MPI_Instance::send_buffer_2_dst(selected_nbrs, // dest, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // } // message_time += WallTimer::get_time_mark(); // } //// {//test //// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); //// } // // // Synchronize the used_bp_roots. // for (VertexID v_global : selected_nbrs) { // used_bp_roots[v_global] = 1; // } // // // Mark selected neighbors // for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { // VertexID v_global = selected_nbrs[v_i]; // if (host_id != G.get_master_host_id(v_global)) { // continue; // } // tmp_que[end_tmp_que++] = v_global; // tmp_d[G.get_local_vertex_id(v_global)] = 1; // tmp_s[v_global].first = 1ULL << v_i; // } // } // // // Reduce the global number of active vertices // VertexID global_num_actives = 1; // UnweightedDist d = 0; // while (global_num_actives) { //// for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // // // Send active masters to mirrors // { // std::vector<MsgUnitBP> buffer_send(end_que); // for (VertexID que_i = 0; que_i < end_que; ++que_i) { // VertexID v_global = que[que_i]; // buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); // } //// {// test //// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); //// } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgUnitBP> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgUnitBP &m : buffer_recv) { // VertexID v_global = m.v_global; // if (!G.local_out_degrees[v_global]) { // continue; // } // tmp_s[v_global].first = m.S_n1; // tmp_s[v_global].second = m.S_0; // // Push labels // bit_parallel_push_labels(G, // v_global, // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, // tmp_d, // d); // } //// {// test //// printf("host_id: %u root: %u done push.\n", host_id, root); //// } // } // } // // // Update the sets in tmp_s // { // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; // // } // // Put into the buffer sending to others // std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); //// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1); // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first; // VertexID w = sibling_es[i].second; //// buffer_send.emplace_back(v, tmp_s[v].second); //// buffer_send.emplace_back(w, tmp_s[w].second); // buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); // buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); // } // // Send the messages // for (int root = 0; root < num_hosts; ++root) { // std::vector< std::pair<VertexID, uint64_t> > buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // } ////#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // } ////#endif // // // Swap que and tmp_que // tmp_que.swap(que); // end_que = end_tmp_que; // end_tmp_que = 0; // MPI_Allreduce(&end_que, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // //// } // ++d; // } // // for (VertexID v_local = 0; v_local < num_masters; ++v_local) { // VertexID v_global = G.get_global_vertex_id(v_local); // L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; // L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} // L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } //} //// Function bit parallel checking: //// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking( // VertexID v_id, // VertexID w_id, // const std::vector<IndexType> &L, // UnweightedDist iter) //{ // // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // const IndexType &Lv = L[v_id]; // const IndexType &Lw = L[w_id]; // // _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0); // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF. // if (td - 2 <= iter) { // td += // (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 : // ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) | // (Lv.bp_sets[i][1] & Lw.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { //// ++bp_hit_count; // return false; // } // } // } // return true; //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template <VertexID BATCH_SIZE> inline VertexID DistBVCPLL<BATCH_SIZE>:: initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots) { // Get the roots_master_local, containing all local roots. std::vector<VertexID> roots_master_local; VertexID roots_bound = roots_start + roots_size; for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { roots_master_local.push_back(G.get_local_vertex_id(r_global)); } } VertexID size_roots_master_local = roots_master_local.size(); // Short_index { if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } else { for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } end_once_candidated_queue = 0; if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } else { for (VertexID r_local : roots_master_local) { short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } } // // Real Index { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } else { for (VertexID r_local : roots_master_local) { IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } } // Dist Table { // struct LabelTableUnit { // VertexID root_id; // VertexID label_global_id; // UnweightedDist dist; // // LabelTableUnit() = default; // // LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : // root_id(r), label_global_id(l), dist(d) {} // }; std::vector<LabelTableUnit> buffer_send; // buffer for sending // Dist_matrix { // Deprecated Old method: unpack the IndexType structure before sending. // Okay, it's back. if (size_roots_master_local >= THRESHOLD_PARALLEL) { // Offsets for adding labels to buffer_send in parallel std::vector<VertexID> offsets_beffer_send(size_roots_master_local); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; offsets_beffer_send[i_r] = L[r_local].vertices.size(); } EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send); buffer_send.resize(size_labels); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; VertexID top_location = 0; IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID dist_bound_index = Lr.distances.size(); for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table // buffer_send[offsets_beffer_send[i_r] + top_location++] = // LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist); buffer_send[offsets_beffer_send[i_r] + top_location++] = LabelTableUnit(r_root_id, Lr.vertices[v_i], dist); } } // } } } else { for (VertexID r_local : roots_master_local) { // The distance table. IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID dist_bound_index = Lr.distances.size(); for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table buffer_send.emplace_back(r_root_id, Lr.vertices[v_i], dist); // buffer for sending // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending } } // } } } } // Broadcast local roots labels for (int root = 0; root < num_hosts; ++root) { std::vector<LabelTableUnit> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record root_id's number of its received label, for later adding to recved_dist_table __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); // recved_dist_table[root_id].push_back(label_global_id); } // Record the received label in recved_dist_table, for later reset #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID &size = sizes_recved_root_labels[root_id]; if (size) { recved_dist_table[root_id].resize(size); size = 0; } } #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id); } } else { for (const LabelTableUnit &l : buffer_recv) { VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record the received label in recved_dist_table, for later reset recved_dist_table[root_id].push_back(label_global_id); } } } } // Build the Bit-Parallel Labels Table { // struct MsgBPLabel { // VertexID r_root_id; // UnweightedDist bp_dist[BITPARALLEL_SIZE]; // uint64_t bp_sets[BITPARALLEL_SIZE][2]; // // MsgBPLabel() = default; // MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) // : r_root_id(r) // { // memcpy(bp_dist, dist, sizeof(bp_dist)); // memcpy(bp_sets, sets, sizeof(bp_sets)); // } // }; // std::vector<MPI_Request> requests_send(num_hosts - 1); std::vector<MsgBPLabel> buffer_send; std::vector<VertexID> roots_queue; for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) != host_id) { continue; } roots_queue.push_back(r_global); } VertexID size_roots_queue = roots_queue.size(); if (size_roots_queue >= THRESHOLD_PARALLEL) { buffer_send.resize(size_roots_queue); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) { VertexID r_global = roots_queue[i_r]; VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } else { // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } for (VertexID r_global : roots_queue) { VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Local roots // memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // Prepare for sending buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgBPLabel> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } VertexID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgBPLabel &m = buffer_recv[i_m]; VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } else { for (const MsgBPLabel &m : buffer_recv) { VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } } } // Active_queue VertexID global_num_actives = 0; // global number of active vertices. { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; active_queue[i_r] = r_local; } end_active_queue = size_roots_master_local; } else { for (VertexID r_local : roots_master_local) { active_queue[end_active_queue++] = r_local; } } // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_SUM, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); } return global_num_actives; } // Sequential Version //// Function for initializing at the begin of a batch //// For a batch, initialize the temporary labels and real labels of roots; //// traverse roots' labels to initialize distance buffer; //// unset flag arrays is_active and got_labels //template <VertexID BATCH_SIZE> //inline VertexID DistBVCPLL<BATCH_SIZE>:: //initialization( // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated, // VertexID b_id, // VertexID roots_start, // VertexID roots_size, //// std::vector<VertexID> &roots_master_local, // const std::vector<uint8_t> &used_bp_roots) //{ // // Get the roots_master_local, containing all local roots. // std::vector<VertexID> roots_master_local; // VertexID roots_bound = roots_start + roots_size; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { // roots_master_local.push_back(G.get_local_vertex_id(r_global)); // } // } // // Short_index // { // for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // VertexID v_local = once_candidated_queue[v_i]; // short_index[v_local].indicator_reset(); // once_candidated[v_local] = 0; // } // end_once_candidated_queue = 0; // for (VertexID r_local : roots_master_local) { // short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels //// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself //// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels // } // } //// // // Real Index // { // for (VertexID r_local : roots_master_local) { // IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size // Lr.distances.emplace_back( // Lr.vertices.size(), // start_index // 1, // size // 0); // dist // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); // } // } // // // Dist Table // { //// struct LabelTableUnit { //// VertexID root_id; //// VertexID label_global_id; //// UnweightedDist dist; //// //// LabelTableUnit() = default; //// //// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : //// root_id(r), label_global_id(l), dist(d) {} //// }; // std::vector<LabelTableUnit> buffer_send; // buffer for sending // // Dist_matrix // { // // Deprecated Old method: unpack the IndexType structure before sending. // for (VertexID r_local : roots_master_local) { // // The distance table. // IndexType &Lr = L[r_local]; // VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // // Write into the dist_table //// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending // } // } // } // } // } // // Broadcast local roots labels // for (int root = 0; root < num_hosts; ++root) { // std::vector<LabelTableUnit> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const LabelTableUnit &l : buffer_recv) { // VertexID root_id = l.root_id; // VertexID label_global_id = l.label_global_id; // UnweightedDist dist = l.dist; // dist_table[root_id][label_global_id] = dist; // // Record the received label in recved_dist_table, for later reset // recved_dist_table[root_id].push_back(label_global_id); // } // } // } // // // Build the Bit-Parallel Labels Table // { //// struct MsgBPLabel { //// VertexID r_root_id; //// UnweightedDist bp_dist[BITPARALLEL_SIZE]; //// uint64_t bp_sets[BITPARALLEL_SIZE][2]; //// //// MsgBPLabel() = default; //// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) //// : r_root_id(r) //// { //// memcpy(bp_dist, dist, sizeof(bp_dist)); //// memcpy(bp_sets, sets, sizeof(bp_sets)); //// } //// }; //// std::vector<MPI_Request> requests_send(num_hosts - 1); // std::vector<MsgBPLabel> buffer_send; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } // VertexID r_local = G.get_local_vertex_id(r_global); // VertexID r_root = r_global - roots_start; // // Local roots //// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); //// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgBPLabel> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgBPLabel &m : buffer_recv) { // VertexID r_root = m.r_root_id; // memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // } // } // } // // // TODO: parallel enqueue // // Active_queue // VertexID global_num_actives = 0; // global number of active vertices. // { // for (VertexID r_local : roots_master_local) { // active_queue[end_active_queue++] = r_local; // } // // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // } // // return global_num_actives; //} //// Function: push v_head_global's newly added labels to its all neighbors. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // const BPLabelType &L_label = bp_labels_table[label_root_id]; // VertexID label_global_id = label_root_id + roots_start; // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // if (v_tail_global <= label_global_id) { // // remaining v_tail_global has higher rank than the label // return; // } // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } //// {// Just for the complain from the compiler //// assert(iter >= iter); //// } //} template<VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter) { std::vector<std::pair<VertexID, VertexID> > buffer_send_indices; //.first: Vertex ID //.second: size of labels std::vector<VertexID> buffer_send_labels; if (local_size) { const VertexID start_active_queue = global_start; const VertexID size_active_queue = global_size <= local_size ? global_size : local_size; const VertexID bound_active_queue = start_active_queue + size_active_queue; buffer_send_indices.resize(size_active_queue); // Prepare offset for inserting std::vector<VertexID> offsets_buffer_locs(size_active_queue); #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active const IndexType &Lv = L[v_head_local]; offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; } EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); // {// test // if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); // PADO::Utils::system_memory(memtotal, memfree); // printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", // bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); // } // } buffer_send_labels.resize(size_buffer_send_labels); // {// test // if (0 == host_id) { // printf("buffer_send_labels created.\n"); // } // } // Build buffer_send_labels by parallel inserting #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active VertexID v_head_global = G.get_global_vertex_id(v_head_local); const IndexType &Lv = L[v_head_local]; // Prepare the buffer_send_indices VertexID tmp_i_q = i_q - start_active_queue; buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // These 2 index are used for traversing v_head's last inserted labels VertexID l_i_start = Lv.distances.rbegin()->start_index; VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; VertexID top_labels = offsets_buffer_locs[tmp_i_q]; for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { VertexID label_root_id = Lv.vertices[l_i] - roots_start; buffer_send_labels[top_labels++] = label_root_id; // buffer_send_labels.push_back(label_root_id); } } } //////////////////////////////////////////////// //// // const VertexID bound_active_queue = start_active_queue + size_active_queue; // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(size_active_queue); //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); //// {// test //// if (0 == host_id) { //// double memtotal = 0; //// double memfree = 0; //// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); //// PADO::Utils::system_memory(memtotal, memfree); //// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", //// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); //// } //// } // buffer_send_labels.resize(size_buffer_send_labels); //// {// test //// if (0 == host_id) { //// printf("buffer_send_labels created.\n"); //// } //// } // // // Build buffer_send_labels by parallel inserting //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID tmp_i_q = i_q - start_active_queue; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // VertexID top_labels = offsets_buffer_locs[tmp_i_q]; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } //// end_active_queue = 0; //// //////////////////////////////////////////////// for (int root = 0; root < num_hosts; ++root) { // Get the indices std::vector<std::pair<VertexID, VertexID> > indices_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_indices, indices_buffer); if (indices_buffer.empty()) { continue; } // Get the labels std::vector<VertexID> labels_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_labels, labels_buffer); VertexID size_indices_buffer = indices_buffer.size(); // Prepare the offsets for reading indices_buffer std::vector<EdgeID> starts_locs_index(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; starts_locs_index[i_i] = e.second; } EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // Prepare the offsets for inserting v_tails into queue std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; } EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); std::vector<VertexID> tmp_got_candidates_queue(num_ngbrs); std::vector<VertexID> sizes_tmp_got_candidates_queue(size_indices_buffer, 0); std::vector<VertexID> tmp_once_candidated_queue(num_ngbrs); std::vector<VertexID> sizes_tmp_once_candidated_queue(size_indices_buffer, 0); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { VertexID v_head_global = indices_buffer[i_i].first; EdgeID start_index = starts_locs_index[i_i]; EdgeID bound_index = i_i != size_indices_buffer - 1 ? starts_locs_index[i_i + 1] : total_recved_labels; if (G.local_out_degrees[v_head_global]) { local_push_labels_para( v_head_global, start_index, bound_index, roots_start, labels_buffer, G, short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, tmp_got_candidates_queue, sizes_tmp_got_candidates_queue[i_i], offsets_tmp_queue[i_i], got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_i], once_candidated, bp_labels_table, used_bp_roots, iter); } } {// Collect elements from tmp_got_candidates_queue to got_candidates_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); PADO::collect_into_queue( tmp_got_candidates_queue, offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue total_new, got_candidates_queue, end_got_candidates_queue); } {// Collect elements from tmp_once_candidated_queue to once_candidated_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); PADO::collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue total_new, once_candidated_queue, end_once_candidated_queue); } } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator[label_root_id] = 1; {// Deal with race condition if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { // The label is already selected before continue; } } // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local; } // once_candidated[v_tail_local] = 1; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = 1; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); } } // Add into got_candidates queue // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = 1; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } if (!got_candidates[v_tail_local]) { if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local; } } } } // { // assert(iter >= iter); // } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail_global SI_v_tail.indicator[label_root_id] = 1; // SI_v_tail.indicator.set(label_root_id); // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in once_candidated[v_tail_local] = 1; once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } if (SI_v_tail.is_candidate[label_root_id]) { continue; } SI_v_tail.is_candidate[label_root_id] = 1; SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!got_candidates[v_tail_local]) { // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) got_candidates[v_tail_local] = 1; got_candidates_queue[end_got_candidates_queue++] = v_tail_local; } } } // { // assert(iter >= iter); // } } //// Function: pushes v_head's labels to v_head's every (master) neighbor //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // // The data structure of a message //// std::vector< LabelUnitType > buffer_recv; // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin() -> start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size; // // Traverse v_head's every neighbor v_tail // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // // Traverse v_head's last inserted labels // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // VertexID label_global_id = label_root_id + roots_start; // if (v_tail_global <= label_global_id) { // // v_tail_global has higher rank than the label // continue; // } // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // const BPLabelType &L_label = bp_labels_table[label_root_id]; // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } // } // // { // assert(iter >= iter); // } //} //// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts //// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all //// code of this function into the caller, all messages become right. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //sync_masters_2_mirrors( // const DistGraph &G, // const std::vector<VertexID> &active_queue, // VertexID end_active_queue, // std::vector< std::pair<VertexID, VertexID> > &buffer_send, // std::vector<MPI_Request> &requests_send //) //{ //// std::vector< std::pair<VertexID, VertexID> > buffer_send; // // pair.first: Owener vertex ID of the label // // pair.first: label vertex ID of the label // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send.emplace_back(v_head_global, label_root_id); //// {//test //// if (1 == host_id) { //// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);// //// } //// } // } // } // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // assert(!requests_send.empty()); // } // // // Send messages // for (int loc = 0; loc < num_hosts - 1; ++loc) { // int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc); // MPI_Isend(buffer_send.data(), // MPI_Instance::get_sending_size(buffer_send), // MPI_CHAR, // dest_host_id, // SENDING_MASTERS_TO_MIRRORS, // MPI_COMM_WORLD, // &requests_send[loc]); // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // } // } //} // Function for distance query; // traverse vertex v_id's labels; // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template <VertexID BATCH_SIZE> inline bool DistBVCPLL<BATCH_SIZE>:: distance_query( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter) { VertexID cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id_local]; // Traverse v_id's all existing labels // VertexID b_i_bound = Lv.batches.size(); // _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); //_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lv.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID dist_bound_index = Lv.distances.size(); for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { UnweightedDist dist = Lv.distances[dist_i].dist; // Cannot use this, because no batch_id any more, so distances are not all in order among batches. // if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // // If the half path distance is already greater than their targeted distance, jump to next batch // break; // } VertexID v_start_index = Lv.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); _mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0); for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id VertexID v = Lv.vertices[v_i]; // v is a label hub of v_id if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_table[cand_root_id][v] does not exist. continue; } VertexID d_tmp = dist + dist_table[cand_root_id][v]; if (d_tmp <= iter) { return false; } } } // } return true; } //// Sequential version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_seq( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send) // UnweightedDist iter) { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // dist_table[v_root_id][cand_real_id] = iter; // Put the update into the buffer_send for later sending buffer_send.emplace_back(v_root_id, cand_real_id); } } //// Parallel Version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send) { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // Put the update into the buffer_send for later sending tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id); } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: update_label_indices( const VertexID v_id_local, const VertexID inserted_count, // std::vector<IndexType> &L, // std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter) { IndexType &Lv = L[v_id_local]; // // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch // if (short_index[v_id_local].indicator[BATCH_SIZE]) { // // Increase the batches' last element's size because a new distance element need to be added // ++(Lv.batches.rbegin() -> size); // } else { // short_index[v_id_local].indicator[BATCH_SIZE] = 1; //// short_index[v_id_local].indicator.set(BATCH_SIZE); // // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added // Lv.batches.emplace_back( // b_id, // batch id // Lv.distances.size(), // start index // 1); // size // } // Insert a new distance element with start_index, size, and dist Lv.distances.emplace_back( Lv.vertices.size() - inserted_count, // start index inserted_count, // size iter); // distance } // Function to reset dist_table the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: reset_at_end( // const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table) { // // Reset dist_table according to local masters' labels // for (VertexID r_local_id : roots_master_local) { // IndexType &Lr = L[r_local_id]; // VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST; // } // } // } // } // Reset dist_table according to received masters' labels from other hosts for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { for (VertexID cand_real_id : recved_dist_table[r_root_id]) { dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST; } recved_dist_table[r_root_id].clear(); } // Reset bit-parallel labels table for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist)); memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets)); } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, // start id of roots const VertexID roots_size, // how many roots in the batch const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated) // std::vector<bool> &once_candidated) { // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); VertexID global_num_actives = initialization(G, short_index, dist_table, recved_dist_table, bp_labels_table, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, // b_id, roots_start, roots_size, // roots_master_local, used_bp_roots); // initializing_time += WallTimer::get_time_mark(); UnweightedDist iter = 0; // The iterator, also the distance for current iteration // {//test // if (0 == host_id) { // printf("host_id: %u initialization finished.\n", host_id); // } // } while (global_num_actives) { ++iter; //#ifdef DEBUG_MESSAGES_ON // {//test //// if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("iter: %u " // "host_id: %d " // "global_num_actives: %u " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // iter, // host_id, // global_num_actives, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); //// } // } //#endif // Traverse active vertices to push their labels as candidates // Send masters' newly added labels to other hosts { // scatter_time -= WallTimer::get_time_mark(); // Divide the pushing into many-time runs. const VertexID chunk_size = 1 << 24; VertexID remainder = global_num_actives % chunk_size; VertexID bound_global_i = global_num_actives - remainder; // VertexID remainder = end_active_queue % chunk_size; // VertexID bound_active_queue = end_active_queue - remainder; VertexID local_size; for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) { if (global_i < end_active_queue) { local_size = end_active_queue - global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, global_i, chunk_size, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } if (remainder) { if (bound_global_i < end_active_queue) { local_size = end_active_queue - bound_global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, bound_global_i, remainder, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } // // schedule_label_pushing_para( // G, // roots_start, // used_bp_roots, // active_queue, // 0, // end_active_queue, // got_candidates_queue, // end_got_candidates_queue, // short_index, // bp_labels_table, // got_candidates, // is_active, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // iter); end_active_queue = 0; // scatter_time += WallTimer::get_time_mark(); } //// For Backup // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // if (end_active_queue >= THRESHOLD_PARALLEL) { // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(end_active_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); // buffer_send_labels.resize(size_buffer_send_labels); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID top_labels = 0; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[offsets_buffer_locs[i_q] + top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } // } else { // // Sequential Version // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // VertexID size_indices_buffer = indices_buffer.size(); // if (size_indices_buffer >= THRESHOLD_PARALLEL) { // // Prepare the offsets for reading indices_buffer // std::vector<EdgeID> starts_locs_index(size_indices_buffer); //#pragma omp parallel for // for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { // const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; // starts_locs_index[i_i] = e.second; // } // EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // // // Prepare the offsets for inserting v_tails into queue // std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); //#pragma omp parallel for // for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { // const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; // offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; // } // EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); // std::vector<VertexID> tmp_got_candidates_queue(num_ngbrs); // std::vector<VertexID> sizes_tmp_got_candidates_queue(size_indices_buffer, 0); // std::vector<VertexID> tmp_once_candidated_queue(num_ngbrs); // std::vector<VertexID> sizes_tmp_once_candidated_queue(size_indices_buffer, 0); //#pragma omp parallel for // for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { // VertexID v_head_global = indices_buffer[i_i].first; // EdgeID start_index = starts_locs_index[i_i]; // EdgeID bound_index = i_i != size_indices_buffer - 1 ? // starts_locs_index[i_i + 1] : total_recved_labels; // if (G.local_out_degrees[v_head_global]) { // local_push_labels_para( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // // std::vector<VertexID> &got_candidates_queue, // // VertexID &end_got_candidates_queue, // tmp_got_candidates_queue, // sizes_tmp_got_candidates_queue[i_i], // offsets_tmp_queue[i_i], // got_candidates, // // std::vector<VertexID> &once_candidated_queue, // // VertexID &end_once_candidated_queue, // tmp_once_candidated_queue, // sizes_tmp_once_candidated_queue[i_i], // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // } // // {// Collect elements from tmp_got_candidates_queue to got_candidates_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); // PADO::collect_into_queue( // tmp_got_candidates_queue, // offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue // sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue // total_new, // got_candidates_queue, // end_got_candidates_queue); // } // {// Collect elements from tmp_once_candidated_queue to once_candidated_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); // PADO::collect_into_queue( // tmp_once_candidated_queue, // offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue // sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue // total_new, // once_candidated_queue, // end_once_candidated_queue); // } // } else { // // Sequential Version // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> &e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels_seq( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // } // scatter_time += WallTimer::get_time_mark(); // } // {//test // if (0 == host_id) { // printf("iter: %u pushing labels finished.\n", iter); // } // } // Traverse vertices in the got_candidates_queue to insert labels { // gather_time -= WallTimer::get_time_mark(); std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // pair.first: root id // pair.second: label (global) id of the root // if (true) { if (end_got_candidates_queue >= THRESHOLD_PARALLEL) { // Prepare for parallel active_queue // Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already. // Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it. std::vector<VertexID> offsets_tmp_active_queue(end_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { offsets_tmp_active_queue[i_q] = i_q; } std::vector<VertexID> tmp_active_queue(end_got_candidates_queue); std::vector<VertexID> sizes_tmp_active_queue(end_got_candidates_queue, 0); // Size will only be 0 or 1, but it will become offsets eventually. // Prepare for parallel buffer_send std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { VertexID v_id_local = got_candidates_queue[i_q]; VertexID v_global_id = G.get_global_vertex_id(v_id_local); if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // If v_global_id is root, its new labels should be put into buffer_send offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que; } else { offsets_tmp_buffer_send[i_q] = 0; } } EdgeID total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); // {// test // if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // double bytes_buffer_send = total_send_labels * sizeof(VertexID); // PADO::Utils::system_memory(memtotal, memfree); // printf("bytes_tmp_buffer_send: %fGB memtotal: %fGB memfree: %fGB\n", // bytes_buffer_send / (1 << 30), memtotal / 1024, memfree / 1024); // } // } std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send(total_send_labels); // {// test // if (0 == host_id) { // printf("tmp_buffer_send created.\n"); // } // } std::vector<EdgeID> sizes_tmp_buffer_send(end_got_candidates_queue, 0); #pragma omp parallel for for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_para( cand_root_id, v_id_local, roots_start, roots_size, G, tmp_buffer_send, sizes_tmp_buffer_send[i_queue], offsets_tmp_buffer_send[i_queue]); // buffer_send); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, // short_index, // b_id, iter); } } {// Collect elements from tmp_active_queue to active_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); PADO::collect_into_queue( tmp_active_queue, offsets_tmp_active_queue, sizes_tmp_active_queue, total_new, active_queue, end_active_queue); } {// Collect elements from tmp_buffer_send to buffer_send EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); // {// test // if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // double bytes_buffer_send = total_new * sizeof(VertexID); // PADO::Utils::system_memory(memtotal, memfree); // printf("bytes_buffer_send: %fGB memtotal: %fGB memfree: %fGB\n", // bytes_buffer_send / (1 << 30), memtotal / 1024, memfree / 1024); // } // } buffer_send.resize(total_new); // {// test // if (0 == host_id) { // printf("buffer_send created.\n"); // } // } EdgeID zero_size = 0; PADO::collect_into_queue( tmp_buffer_send, offsets_tmp_buffer_send, sizes_tmp_buffer_send, total_new, buffer_send, zero_size); // {//test // if (iter == 6) { // for (VertexID i_b = 0; i_b < total_new; ++i_b) { // const auto &e = buffer_send[i_b]; // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // if (root_id > 1024) { // printf("total_new: %lu " // "buffer_send[%u]: " // "root_id: %u " // "cand_real_id: %u\n", // total_new, // i_b, // root_id, // cand_real_id); // exit(1); // } // } // } // } } } else { for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; active_queue[end_active_queue++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_seq( cand_root_id, v_id_local, roots_start, roots_size, G, // dist_table, buffer_send); // iter); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, // short_index, // b_id, iter); } } } // {//test // printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); // } end_got_candidates_queue = 0; // Set the got_candidates_queue empty // Sync the dist_table for (int root = 0; root < num_hosts; ++root) { std::vector<std::pair<VertexID, VertexID>> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { // Get label number for every root std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); } // Resize the recved_dist_table for every root #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID old_size = recved_dist_table[root_id].size(); VertexID tmp_size = sizes_recved_root_labels[root_id]; if (tmp_size) { recved_dist_table[root_id].resize(old_size + tmp_size); sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // If tmp_size == 0, root_id has no received labels. // sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // Recorde received labels in recved_dist_table #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], cand_real_id); } } else { for (const std::pair<VertexID, VertexID> &e : buffer_recv) { VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; // Record the received element, for future reset recved_dist_table[root_id].push_back(cand_real_id); } } } // Sync the global_num_actives MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_MAX, // MPI_SUM, MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); } // {//test // if (0 == host_id) { // printf("iter: %u inserting labels finished.\n", iter); // } // } } // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); reset_at_end( // G, // roots_start, // roots_master_local, dist_table, recved_dist_table, bp_labels_table); // clearup_time += WallTimer::get_time_mark(); // {//test // if (0 == host_id) { // printf("host_id: %u resetting finished.\n", host_id); // } // } } //// Sequential Version //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //batch_process( // const DistGraph &G, // VertexID b_id, // VertexID roots_start, // start id of roots // VertexID roots_size, // how many roots in the batch // const std::vector<uint8_t> &used_bp_roots, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<uint8_t> &got_candidates, //// std::vector<bool> &got_candidates, // std::vector<uint8_t> &is_active, //// std::vector<bool> &is_active, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated) //// std::vector<bool> &once_candidated) //{ // // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // VertexID global_num_actives = initialization(G, // short_index, // dist_table, // recved_dist_table, // bp_labels_table, // active_queue, // end_active_queue, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // b_id, // roots_start, // roots_size, //// roots_master_local, // used_bp_roots); // initializing_time += WallTimer::get_time_mark(); // UnweightedDist iter = 0; // The iterator, also the distance for current iteration //// {//test //// printf("host_id: %u initialization finished.\n", host_id); //// } // // // while (global_num_actives) { ////#ifdef DEBUG_MESSAGES_ON //// {// //// if (0 == host_id) { //// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives); //// } //// } ////#endif // ++iter; // // Traverse active vertices to push their labels as candidates // // Send masters' newly added labels to other hosts // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // scatter_time += WallTimer::get_time_mark(); // } // // // Traverse vertices in the got_candidates_queue to insert labels // { // gather_time -= WallTimer::get_time_mark(); // std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // // pair.first: root id // // pair.second: label (global) id of the root // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter) ) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, //// dist_table, // buffer_send); //// iter); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, // short_index, // b_id, // iter); // } // } //// {//test //// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); //// } // end_got_candidates_queue = 0; // Set the got_candidates_queue empty // // Sync the dist_table // for (int root = 0; root < num_hosts; ++root) { // std::vector<std::pair<VertexID, VertexID>> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, VertexID> &e : buffer_recv) { // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // dist_table[root_id][cand_real_id] = iter; // // Record the received element, for future reset // recved_dist_table[root_id].push_back(cand_real_id); // } // } // // // Sync the global_num_actives // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); // } // } // // // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); // reset_at_end( //// G, //// roots_start, //// roots_master_local, // dist_table, // recved_dist_table, // bp_labels_table); // clearup_time += WallTimer::get_time_mark(); //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Every host h_i broadcast to others // for (int root = 0; root < num_hosts; ++root) { // std::vector<E_T> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } //// uint64_t size_buffer_send = buffer_send.size(); //// // Sync the size_buffer_send. //// message_time -= WallTimer::get_time_mark(); //// MPI_Bcast(&size_buffer_send, //// 1, //// MPI_UINT64_T, //// root, //// MPI_COMM_WORLD); //// message_time += WallTimer::get_time_mark(); ////// {// test ////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); ////// } //// if (!size_buffer_send) { //// continue; //// } //// message_time -= WallTimer::get_time_mark(); //// std::vector<E_T> buffer_recv(size_buffer_send); //// if (host_id == root) { //// buffer_recv.assign(buffer_send.begin(), buffer_send.end()); //// } //// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; //// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) { //// // Only need 1 broadcast //// //// MPI_Bcast(buffer_recv.data(), //// bytes_buffer_send, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// } else { //// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; //// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; //// size_t offset = 0; //// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { ////// size_t offset = b_i * unit_buffer_size; //// size_t size_unit_buffer = b_i == num_unit_buffers - 1 //// ? size_buffer_send - offset //// : unit_buffer_size; //// MPI_Bcast(buffer_recv.data() + offset, //// size_unit_buffer * ETypeSize, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// offset += unit_buffer_size; //// } //// } //// message_time += WallTimer::get_time_mark(); // for (const E_T &e : buffer_recv) { // fun(e); // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // // Every host sends to others // for (int src = 0; src < num_hosts; ++src) { // if (host_id == src) { // // Send from src // message_time -= WallTimer::get_time_mark(); // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, host_id); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // } // message_time += WallTimer::get_time_mark(); // } else { // // Receive from src // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, src); // if (host_id == dst) { // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // Every host sends (num_hosts - 1) times // for (int hop = 1; hop < num_hosts; ++hop) { // int src = hop_2_me_host_id(-hop); // int dst = hop_2_me_host_id(hop); // if (src != dst) { // Normal case // // When host_id is odd, first receive, then send. // if (static_cast<uint32_t>(host_id) & 1U) { // message_time -= WallTimer::get_time_mark(); // // Receive first. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // // Send then. // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // When host_id is even, first send, then receive. // // Send first. // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // // Receive then. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } else { // If host_id is higher than dst, first send, then receive // // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2. // if (host_id < dst) { // // Send // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Receive // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // Otherwise, if host_id is lower than dst, first receive, then send // // Receive // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Send // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } //} //// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // const uint32_t UNIT_BUFFER_SIZE = 16U << 20U; // // Every host h_i broadcast to others // for (int h_i = 0; h_i < num_hosts; ++h_i) { // uint64_t size_buffer_send = buffer_send.size(); // // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&size_buffer_send, // 1, // MPI_UINT64_T, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); //// {// test //// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); //// } // if (!size_buffer_send) { // continue; // } // uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE; // // // Broadcast the buffer_send // for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) { // // Prepare the unit buffer // message_time -= WallTimer::get_time_mark(); // size_t offset = b_i * UNIT_BUFFER_SIZE; // size_t size_unit_buffer = b_i == num_unit_buffers - 1 // ? size_buffer_send - offset // : UNIT_BUFFER_SIZE; // std::vector<E_T> unit_buffer(size_unit_buffer); // // Copy the messages from buffer_send to unit buffer. // if (host_id == h_i) { // unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer); // } // // Broadcast the unit buffer // MPI_Bcast(unit_buffer.data(), // MPI_Instance::get_sending_size(unit_buffer), // MPI_CHAR, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // // Process every element of unit_buffer // for (const E_T &e : unit_buffer) { // fun(e); // } // } // } //} // Function: Host root broadcasts its sending buffer to a receiving buffer. template <VertexID BATCH_SIZE> template <typename E_T> inline void DistBVCPLL<BATCH_SIZE>:: one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv) { const size_t ETypeSize = sizeof(E_T); uint64_t size_buffer_send = buffer_send.size(); // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); MPI_Bcast(&size_buffer_send, 1, MPI_UINT64_T, root, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); buffer_recv.resize(size_buffer_send); if (!size_buffer_send) { return; } // Broadcast the buffer_send // message_time -= WallTimer::get_time_mark(); if (host_id == root) { buffer_recv.assign(buffer_send.begin(), buffer_send.end()); } uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) { // Only need 1 broadcast MPI_Bcast(buffer_recv.data(), bytes_buffer_send, MPI_CHAR, root, MPI_COMM_WORLD); } else { const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; size_t offset = 0; for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { size_t size_unit_buffer = b_i == num_unit_buffers - 1 ? size_buffer_send - offset : unit_buffer_size; MPI_Bcast(buffer_recv.data() + offset, size_unit_buffer * ETypeSize, MPI_CHAR, root, MPI_COMM_WORLD); offset += unit_buffer_size; } } // message_time += WallTimer::get_time_mark(); } } #endif //PADO_DPADO_H
elastic_kernel_3d_so8.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict block_sizes_vec, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_fxx_vec, struct dataobj *restrict save_src_fyy_vec, struct dataobj *restrict save_src_fzz_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict tau_sol_xx_vec, struct dataobj *restrict tau_sol_xy_vec, struct dataobj *restrict tau_sol_xz_vec, struct dataobj *restrict tau_sol_yy_vec, struct dataobj *restrict tau_sol_yz_vec, struct dataobj *restrict tau_sol_zz_vec, struct dataobj *restrict v_sol_x_vec, struct dataobj *restrict v_sol_y_vec, struct dataobj *restrict v_sol_z_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_fxx)[save_src_fxx_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fxx_vec->size[1]])save_src_fxx_vec->data; float(*restrict save_src_fyy)[save_src_fyy_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fyy_vec->size[1]])save_src_fyy_vec->data; float(*restrict save_src_fzz)[save_src_fzz_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fzz_vec->size[1]])save_src_fzz_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict tau_sol_xx)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]])tau_sol_xx_vec->data; float(*restrict tau_sol_xy)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]])tau_sol_xy_vec->data; float(*restrict tau_sol_xz)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]])tau_sol_xz_vec->data; float(*restrict tau_sol_yy)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]])tau_sol_yy_vec->data; float(*restrict tau_sol_yz)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]])tau_sol_yz_vec->data; float(*restrict tau_sol_zz)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]])tau_sol_zz_vec->data; float(*restrict v_sol_x)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]])v_sol_x_vec->data; float(*restrict v_sol_y)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]])v_sol_y_vec->data; float(*restrict v_sol_z)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]])v_sol_z_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; int sf = 8; int t_blk_size = 2 * sf * (time_M - time_m); //int xb_size = 64; //int yb_size = 64; //x0_blk0_size = 8; //y0_blk0_size = 8; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size , yb_size , x0_blk0_size, y0_blk0_size); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size) { for (int time = t_blk, t0 = (time) % (2), t1 = (time + 1) % (2); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (2), t1 = (((time / sf) % (time_M - time_m + 1))) % (2)) { int tw = ((time / sf) % (time_M - time_m + 1)); #pragma omp parallel num_threads(nthreads) { //printf(" Change of time block : %d \n", tw); #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { //printf(" Change of inner xblock %d \n", x0_blk0); for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++) { //printf(" Updating velocity x %d \n", x - time + 4); //printf(" \n PDE update : \n"); #pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 32) for (int z = z_m; z <= z_M; z += 1) { //printf(" Updating velocity x %d z: %d \n", x - time + 4, z + 4); float r26 = 1.0 / h_z; float r25 = 1.0 / h_y; float r24 = 1.0 / h_x; v_sol_x[t1][x - time + 8][y - time + 8][z + 8] = r24 * (4.56702358488521e-4F * (tau_sol_xx[t0][x - time + 5][y - time + 8][z + 8] - tau_sol_xx[t0][x - time + 12][y - time + 8][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xx[t0][x - time + 6][y - time + 8][z + 8] + tau_sol_xx[t0][x - time + 11][y - time + 8][z + 8]) + 5.22163029879319e-2F * (tau_sol_xx[t0][x - time + 7][y - time + 8][z + 8] - tau_sol_xx[t0][x - time + 10][y - time + 8][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xx[t0][x - time + 8][y - time + 8][z + 8] + tau_sol_xx[t0][x - time + 9][y - time + 8][z + 8])) + r25 * (4.56702358488521e-4F * (tau_sol_xy[t0][x - time + 8][y - time + 4][z + 8] - tau_sol_xy[t0][x - time + 8][y - time + 11][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xy[t0][x - time + 8][y - time + 5][z + 8] + tau_sol_xy[t0][x - time + 8][y - time + 10][z + 8]) + 5.22163029879319e-2F * (tau_sol_xy[t0][x - time + 8][y - time + 6][z + 8] - tau_sol_xy[t0][x - time + 8][y - time + 9][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xy[t0][x - time + 8][y - time + 7][z + 8] + tau_sol_xy[t0][x - time + 8][y - time + 8][z + 8])) + r26 * (4.56702358488521e-4F * (tau_sol_xz[t0][x - time + 8][y - time + 8][z + 4] - tau_sol_xz[t0][x - time + 8][y - time + 8][z + 11]) + 6.2659563586471e-3F * (-tau_sol_xz[t0][x - time + 8][y - time + 8][z + 5] + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 10]) + 5.22163029879319e-2F * (tau_sol_xz[t0][x - time + 8][y - time + 8][z + 6] - tau_sol_xz[t0][x - time + 8][y - time + 8][z + 9]) + 7.8324454477134e-1F * (-tau_sol_xz[t0][x - time + 8][y - time + 8][z + 7] + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 8])) + v_sol_x[t0][x - time + 8][y - time + 8][z + 8]; v_sol_y[t1][x - time + 8][y - time + 8][z + 8] = r24 * (4.56702358488521e-4F * (tau_sol_xy[t0][x - time + 4][y - time + 8][z + 8] - tau_sol_xy[t0][x - time + 11][y - time + 8][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xy[t0][x - time + 5][y - time + 8][z + 8] + tau_sol_xy[t0][x - time + 10][y - time + 8][z + 8]) + 5.22163029879319e-2F * (tau_sol_xy[t0][x - time + 6][y - time + 8][z + 8] - tau_sol_xy[t0][x - time + 9][y - time + 8][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xy[t0][x - time + 7][y - time + 8][z + 8] + tau_sol_xy[t0][x - time + 8][y - time + 8][z + 8])) + r25 * (4.56702358488521e-4F * (tau_sol_yy[t0][x - time + 8][y - time + 5][z + 8] - tau_sol_yy[t0][x - time + 8][y - time + 12][z + 8]) + 6.2659563586471e-3F * (-tau_sol_yy[t0][x - time + 8][y - time + 6][z + 8] + tau_sol_yy[t0][x - time + 8][y - time + 11][z + 8]) + 5.22163029879319e-2F * (tau_sol_yy[t0][x - time + 8][y - time + 7][z + 8] - tau_sol_yy[t0][x - time + 8][y - time + 10][z + 8]) + 7.8324454477134e-1F * (-tau_sol_yy[t0][x - time + 8][y - time + 8][z + 8] + tau_sol_yy[t0][x - time + 8][y - time + 9][z + 8])) + r26 * (4.56702358488521e-4F * (tau_sol_yz[t0][x - time + 8][y - time + 8][z + 4] - tau_sol_yz[t0][x - time + 8][y - time + 8][z + 11]) + 6.2659563586471e-3F * (-tau_sol_yz[t0][x - time + 8][y - time + 8][z + 5] + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 10]) + 5.22163029879319e-2F * (tau_sol_yz[t0][x - time + 8][y - time + 8][z + 6] - tau_sol_yz[t0][x - time + 8][y - time + 8][z + 9]) + 7.8324454477134e-1F * (-tau_sol_yz[t0][x - time + 8][y - time + 8][z + 7] + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 8])) + v_sol_y[t0][x - time + 8][y - time + 8][z + 8]; v_sol_z[t1][x - time + 8][y - time + 8][z + 8] = r24 * (4.56702358488521e-4F * (tau_sol_xz[t0][x - time + 4][y - time + 8][z + 8] - tau_sol_xz[t0][x - time + 11][y - time + 8][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xz[t0][x - time + 5][y - time + 8][z + 8] + tau_sol_xz[t0][x - time + 10][y - time + 8][z + 8]) + 5.22163029879319e-2F * (tau_sol_xz[t0][x - time + 6][y - time + 8][z + 8] - tau_sol_xz[t0][x - time + 9][y - time + 8][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xz[t0][x - time + 7][y - time + 8][z + 8] + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 8])) + r25 * (4.56702358488521e-4F * (tau_sol_yz[t0][x - time + 8][y - time + 4][z + 8] - tau_sol_yz[t0][x - time + 8][y - time + 11][z + 8]) + 6.2659563586471e-3F * (-tau_sol_yz[t0][x - time + 8][y - time + 5][z + 8] + tau_sol_yz[t0][x - time + 8][y - time + 10][z + 8]) + 5.22163029879319e-2F * (tau_sol_yz[t0][x - time + 8][y - time + 6][z + 8] - tau_sol_yz[t0][x - time + 8][y - time + 9][z + 8]) + 7.8324454477134e-1F * (-tau_sol_yz[t0][x - time + 8][y - time + 7][z + 8] + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 8])) + r26 * (4.56702358488521e-4F * (tau_sol_zz[t0][x - time + 8][y - time + 8][z + 5] - tau_sol_zz[t0][x - time + 8][y - time + 8][z + 12]) + 6.2659563586471e-3F * (-tau_sol_zz[t0][x - time + 8][y - time + 8][z + 6] + tau_sol_zz[t0][x - time + 8][y - time + 8][z + 11]) + 5.22163029879319e-2F * (tau_sol_zz[t0][x - time + 8][y - time + 8][z + 7] - tau_sol_zz[t0][x - time + 8][y - time + 8][z + 10]) + 7.8324454477134e-1F * (-tau_sol_zz[t0][x - time + 8][y - time + 8][z + 8] + tau_sol_zz[t0][x - time + 8][y - time + 8][z + 9])) + v_sol_z[t0][x - time + 8][y - time + 8][z + 8]; } } } } } } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb - 4); x0_blk0 <= +min((x_M + time), (xb - 4 + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb - 4); y0_blk0 <= +min((y_M + time), (yb - 4 + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb - 4 + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb - 4 + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++) { //printf(" Updating stress x %d \n", x - time + 4); #pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 32) for (int z = z_m; z <= z_M; z += 1) { //printf(" Updating x %d z: %d \n", x - time + 4, z + 4); float r47 = -v_sol_z[t1][x - time + 8][y - time + 8][z + 8]; float r46 = -v_sol_y[t1][x - time + 8][y - time + 8][z + 8]; float r45 = -v_sol_x[t1][x - time + 8][y - time + 8][z + 8]; float r44 = -v_sol_y[t1][x - time + 8][y - time + 7][z + 8] + v_sol_y[t1][x - time + 8][y - time + 8][z + 8]; float r43 = v_sol_y[t1][x - time + 8][y - time + 6][z + 8] - v_sol_y[t1][x - time + 8][y - time + 9][z + 8]; float r42 = -v_sol_y[t1][x - time + 8][y - time + 5][z + 8] + v_sol_y[t1][x - time + 8][y - time + 10][z + 8]; float r41 = v_sol_y[t1][x - time + 8][y - time + 4][z + 8] - v_sol_y[t1][x - time + 8][y - time + 11][z + 8]; float r40 = -v_sol_z[t1][x - time + 8][y - time + 8][z + 7] + v_sol_z[t1][x - time + 8][y - time + 8][z + 8]; float r39 = v_sol_z[t1][x - time + 8][y - time + 8][z + 6] - v_sol_z[t1][x - time + 8][y - time + 8][z + 9]; float r38 = -v_sol_z[t1][x - time + 8][y - time + 8][z + 5] + v_sol_z[t1][x - time + 8][y - time + 8][z + 10]; float r37 = v_sol_z[t1][x - time + 8][y - time + 8][z + 4] - v_sol_z[t1][x - time + 8][y - time + 8][z + 11]; float r36 = -v_sol_x[t1][x - time + 7][y - time + 8][z + 8] + v_sol_x[t1][x - time + 8][y - time + 8][z + 8]; float r35 = v_sol_x[t1][x - time + 6][y - time + 8][z + 8] - v_sol_x[t1][x - time + 9][y - time + 8][z + 8]; float r34 = -v_sol_x[t1][x - time + 5][y - time + 8][z + 8] + v_sol_x[t1][x - time + 10][y - time + 8][z + 8]; float r33 = v_sol_x[t1][x - time + 4][y - time + 8][z + 8] - v_sol_x[t1][x - time + 11][y - time + 8][z + 8]; float r32 = 1.0 / h_y; float r31 = 1.0 / h_z; float r30 = 1.0 / h_x; float r29 = r30 * (2.95943128300561e-3F * r33 + 4.06033972040332e-2F * r34 + 3.38361643361799e-1F * r35 + 5.07542465011829F * r36); float r28 = r31 * (2.95943128300561e-3F * r37 + 4.06033972040332e-2F * r38 + 3.38361643361799e-1F * r39 + 5.07542465011829F * r40); float r27 = r32 * (2.95943128300561e-3F * r41 + 4.06033972040332e-2F * r42 + 3.38361643361799e-1F * r43 + 5.07542465011829F * r44); tau_sol_xx[t1][x - time + 8][y - time + 8][z + 8] = r27 + r28 + r30 * (5.91886256601123e-3F * r33 + 8.12067944080664e-2F * r34 + 6.76723286723597e-1F * r35 + 1.01508493002366e+1F * r36) + tau_sol_xx[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_xy[t1][x - time + 8][y - time + 8][z + 8] = r30 * (2.53771232505914F * (r46 + v_sol_y[t1][x - time + 9][y - time + 8][z + 8]) + 1.47971564150281e-3F * (v_sol_y[t1][x - time + 5][y - time + 8][z + 8] - v_sol_y[t1][x - time + 12][y - time + 8][z + 8]) + 2.03016986020166e-2F * (-v_sol_y[t1][x - time + 6][y - time + 8][z + 8] + v_sol_y[t1][x - time + 11][y - time + 8][z + 8]) + 1.69180821680899e-1F * (v_sol_y[t1][x - time + 7][y - time + 8][z + 8] - v_sol_y[t1][x - time + 10][y - time + 8][z + 8])) + r32 * (2.53771232505914F * (r45 + v_sol_x[t1][x - time + 8][y - time + 9][z + 8]) + 1.47971564150281e-3F * (v_sol_x[t1][x - time + 8][y - time + 5][z + 8] - v_sol_x[t1][x - time + 8][y - time + 12][z + 8]) + 2.03016986020166e-2F * (-v_sol_x[t1][x - time + 8][y - time + 6][z + 8] + v_sol_x[t1][x - time + 8][y - time + 11][z + 8]) + 1.69180821680899e-1F * (v_sol_x[t1][x - time + 8][y - time + 7][z + 8] - v_sol_x[t1][x - time + 8][y - time + 10][z + 8])) + tau_sol_xy[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_xz[t1][x - time + 8][y - time + 8][z + 8] = r30 * (2.53771232505914F * (r47 + v_sol_z[t1][x - time + 9][y - time + 8][z + 8]) + 1.47971564150281e-3F * (v_sol_z[t1][x - time + 5][y - time + 8][z + 8] - v_sol_z[t1][x - time + 12][y - time + 8][z + 8]) + 2.03016986020166e-2F * (-v_sol_z[t1][x - time + 6][y - time + 8][z + 8] + v_sol_z[t1][x - time + 11][y - time + 8][z + 8]) + 1.69180821680899e-1F * (v_sol_z[t1][x - time + 7][y - time + 8][z + 8] - v_sol_z[t1][x - time + 10][y - time + 8][z + 8])) + r31 * (2.53771232505914F * (r45 + v_sol_x[t1][x - time + 8][y - time + 8][z + 9]) + 1.47971564150281e-3F * (v_sol_x[t1][x - time + 8][y - time + 8][z + 5] - v_sol_x[t1][x - time + 8][y - time + 8][z + 12]) + 2.03016986020166e-2F * (-v_sol_x[t1][x - time + 8][y - time + 8][z + 6] + v_sol_x[t1][x - time + 8][y - time + 8][z + 11]) + 1.69180821680899e-1F * (v_sol_x[t1][x - time + 8][y - time + 8][z + 7] - v_sol_x[t1][x - time + 8][y - time + 8][z + 10])) + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_yy[t1][x - time + 8][y - time + 8][z + 8] = r28 + r29 + r32 * (5.91886256601123e-3F * r41 + 8.12067944080664e-2F * r42 + 6.76723286723597e-1F * r43 + 1.01508493002366e+1F * r44) + tau_sol_yy[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_yz[t1][x - time + 8][y - time + 8][z + 8] = r31 * (2.53771232505914F * (r46 + v_sol_y[t1][x - time + 8][y - time + 8][z + 9]) + 1.47971564150281e-3F * (v_sol_y[t1][x - time + 8][y - time + 8][z + 5] - v_sol_y[t1][x - time + 8][y - time + 8][z + 12]) + 2.03016986020166e-2F * (-v_sol_y[t1][x - time + 8][y - time + 8][z + 6] + v_sol_y[t1][x - time + 8][y - time + 8][z + 11]) + 1.69180821680899e-1F * (v_sol_y[t1][x - time + 8][y - time + 8][z + 7] - v_sol_y[t1][x - time + 8][y - time + 8][z + 10])) + r32 * (2.53771232505914F * (r47 + v_sol_z[t1][x - time + 8][y - time + 9][z + 8]) + 1.47971564150281e-3F * (v_sol_z[t1][x - time + 8][y - time + 5][z + 8] - v_sol_z[t1][x - time + 8][y - time + 12][z + 8]) + 2.03016986020166e-2F * (-v_sol_z[t1][x - time + 8][y - time + 6][z + 8] + v_sol_z[t1][x - time + 8][y - time + 11][z + 8]) + 1.69180821680899e-1F * (v_sol_z[t1][x - time + 8][y - time + 7][z + 8] - v_sol_z[t1][x - time + 8][y - time + 10][z + 8])) + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_zz[t1][x - time + 8][y - time + 8][z + 8] = r27 + r29 + r31 * (5.91886256601123e-3F * r37 + 8.12067944080664e-2F * r38 + 6.76723286723597e-1F * r39 + 1.01508493002366e+1F * r40) + tau_sol_zz[t0][x - time + 8][y - time + 8][z + 8]; } for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { //printf("\n Source_injection at : "); int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_fxx[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; float r1 = save_src_fyy[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; float r2 = save_src_fzz[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; tau_sol_xx[t1][x - time + 8][y - time + 8][zind + 8] += r0; tau_sol_yy[t1][x - time + 8][y - time + 8][zind + 8] += r1; tau_sol_zz[t1][x - time + 8][y - time + 8][zind + 8] += r2; //printf(" Time %d , at : %d, %d \n", tw, x - time + 4, zind + 4); } } } } } } } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; return 0; } /* Backdoor edit at Tue Jul 28 12:03:11 2020*/ /* Backdoor edit at Tue Jul 28 12:27:09 2020*/ /* Backdoor edit at Tue Jul 28 12:28:10 2020*/ /* Backdoor edit at Tue Jul 28 15:17:20 2020*/
omp_hello.c
/* Simple "Hello World" program */ #include <stdio.h> #include <omp.h> int main() { int nthreads, tid; #pragma omp parallel private(nthreads, tid) { tid = omp_get_thread_num(); printf("Hello, world! I am thread %d\n", tid); #pragma omp barrier if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } return 0; }
rose_teams.c
#include <stdlib.h> #include <assert.h> #include <omp.h> #include "libxomp.h" int main(argc,argv) int argc; char **argv; { int status = 0; XOMP_init(argc,argv); int res = 0; int n = 10; #pragma omp teams num_teams(n) reduction(+ : res) { res = omp_get_team_num(); if (omp_get_team_num() == 0) n = omp_get_num_teams(); } // Sum of first n-1 natural numbers Assert(res == n * (n - 1) / 2); XOMP_terminate(status); }
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-4a-inch/4a-64-outch/4b; #if __aarch64__ kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16); #else kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 16, 16); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(q / 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd64_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); #else const Mat kernel0_tm = kernel_tm.channel(p); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; float* output0 = out0.row(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1q_f32(output0, _out00); vst1q_f32(output0 + 8, _out02); vst1q_f32(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1q_f32(output0 + 4, _out01); vst1q_f32(output0 + 12, _out03); vst1q_f32(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* kptr = (const float*)kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" // r04 r05 r06 r07 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v28.4s}, [%1] \n" // r08 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v20.4s, v24.4s, v8.s[0] \n" "fmla v21.4s, v24.4s, v10.s[0] \n" "fmla v22.4s, v24.4s, v12.s[0] \n" "fmla v23.4s, v24.4s, v14.s[0] \n" "fmla v20.4s, v25.4s, v8.s[1] \n" "fmla v21.4s, v25.4s, v10.s[1] \n" "fmla v22.4s, v25.4s, v12.s[1] \n" "fmla v23.4s, v25.4s, v14.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v8.s[2] \n" "fmla v21.4s, v26.4s, v10.s[2] \n" "fmla v22.4s, v26.4s, v12.s[2] \n" "fmla v23.4s, v26.4s, v14.s[2] \n" "fmla v20.4s, v27.4s, v8.s[3] \n" "fmla v21.4s, v27.4s, v10.s[3] \n" "fmla v22.4s, v27.4s, v12.s[3] \n" "fmla v23.4s, v27.4s, v14.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v28.4s}, [%2] \n" // r18 "fmla v20.4s, v16.4s, v9.s[0] \n" "fmla v21.4s, v16.4s, v11.s[0] \n" "fmla v22.4s, v16.4s, v13.s[0] \n" "fmla v23.4s, v16.4s, v15.s[0] \n" "fmla v20.4s, v17.4s, v9.s[1] \n" "fmla v21.4s, v17.4s, v11.s[1] \n" "fmla v22.4s, v17.4s, v13.s[1] \n" "fmla v23.4s, v17.4s, v15.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v9.s[2] \n" "fmla v21.4s, v18.4s, v11.s[2] \n" "fmla v22.4s, v18.4s, v13.s[2] \n" "fmla v23.4s, v18.4s, v15.s[2] \n" "fmla v20.4s, v19.4s, v9.s[3] \n" "fmla v21.4s, v19.4s, v11.s[3] \n" "fmla v22.4s, v19.4s, v13.s[3] \n" "fmla v23.4s, v19.4s, v15.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v20.4s, v24.4s, v10.s[0] \n" "fmla v21.4s, v24.4s, v12.s[0] \n" "fmla v22.4s, v24.4s, v14.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v10.s[1] \n" "fmla v21.4s, v25.4s, v12.s[1] \n" "fmla v22.4s, v25.4s, v14.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v10.s[2] \n" "fmla v21.4s, v26.4s, v12.s[2] \n" "fmla v22.4s, v26.4s, v14.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v10.s[3] \n" "fmla v21.4s, v27.4s, v12.s[3] \n" "fmla v22.4s, v27.4s, v14.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v28.4s}, [%3] \n" // r28 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28"); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // r00 r01 r02 r03 "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // r04 r05 r06 r07 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" // r08 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%2, #512] \n" "vldm %2!, {d8-d15} \n" // r10 r11 r12 r13 "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" // r14 r15 r16 r17 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d8-d9}, [%2 :128] \n" // r18 "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d12[0] \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d12[1] \n" "vmla.f32 q13, q9, d0[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d13[0] \n" "vmla.f32 q13, q10, d1[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d13[1] \n" "vmla.f32 q13, q11, d1[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" // r20 r21 r22 r23 "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" // r24 r25 r26 r27 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3 :128] \n" // r28 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v20.4s, v21.4s}, [%0] \n" // sum0 sum1 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmul v22.4s, v16.4s, v0.s[0] \n" "fmul v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4s}, [%1] \n" // r04 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v4.4s}, [%2] \n" // r14 "fmla v22.4s, v16.4s, v1.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3] \n" // r24 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n" // sum0 sum1 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // r00 r01 r02 r03 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmul.f32 q14, q8, d0[0] \n" "vmul.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%1, #128] \n" "vld1.f32 {d8-d9}, [%1 :128] \n" // r04 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" // r10 r11 r12 r13 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d8-d9}, [%2 :128] \n" // r14 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" // r20 r21 r22 r23 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3 :128] \n" // r24 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r00 r01 r02 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmul v21.4s, v16.4s, v0.s[0] \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n" // r10 r11 r12 "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r20 r21 r22 "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v5.s[3] \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "add %1, %1, #32 \n" "fadd v22.4s, v21.4s, v22.4s \n" "add %2, %2, #32 \n" "fadd v23.4s, v23.4s, v22.4s \n" "add %3, %3, #32 \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #128] \n" "vld1.f32 {d24-d25}, [%0 :128] \n" // sum0 "pld [%1, #384] \n" "vldm %1, {d0-d5} \n" // r00 r01 r02 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmul.f32 q13, q8, d0[0] \n" "vmul.f32 q14, q9, d0[1] \n" "vmul.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%2, #384] \n" "vldm %2, {d0-d5} \n" // r10 r11 r12 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%3, #384] \n" "vldm %3, {d0-d5} \n" // r20 r21 r22 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vadd.f32 q14, q14, q13 \n" "add %1, %1, #32 \n" "vadd.f32 q15, q15, q14 \n" "add %2, %2, #32 \n" "vadd.f32 q12, q12, q15 \n" "add %3, %3, #32 \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vst1.f32 {d24-d25}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unop__identity_fc32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_int64) // op(A') function: GB (_unop_tran__identity_fc32_int64) // C type: GxB_FC32_t // A type: int64_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_int64) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GiRaFFE_boundary_conditions.h
// Currently, we're using basic Cartesian boundary conditions, pending fixes by Zach. // Part P8a: Declare boundary condition FACE_UPDATE macro, // which updates a single face of the 3D grid cube // using quadratic polynomial extrapolation. // Basic extrapolation boundary conditions #define FACE_UPDATE(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \ gfs[IDX4S(which_gf,i0,i1,i2)] = \ +2.0*gfs[IDX4S(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -1.0*gfs[IDX4S(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)]; \ } // +1.0*gfs[IDX4S(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \ // Basic Copy boundary conditions #define FACE_UPDATE_COPY(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \ gfs[IDX4S(which_gf,i0,i1,i2)] = gfs[IDX4S(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)]; \ } // Part P8b: Boundary condition driver routine: Apply BCs to all six // boundary faces of the cube, filling in the innermost // ghost zone first, and moving outward. const int MAXFACE = -1; const int NUL = +0; const int MINFACE = +1; // This macro acts differently in that it acts on an entire 3-vector of gfs, instead of 1. // which_gf_0 corresponds to the zeroth component of that vector. The if statements only // evaluate true if the velocity is directed inwards on the face in consideration. #define FACE_UPDATE_OUTFLOW(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \ aux_gfs[IDX4S(which_gf,i0,i1,i2)] = \ +2.0*aux_gfs[IDX4S(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -1.0*aux_gfs[IDX4S(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)]; \ } /* aux_gfs[IDX4S(which_gf_0+1,i0,i1,i2)] = \ +3.0*aux_gfs[IDX4S(which_gf_0+1,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -3.0*aux_gfs[IDX4S(which_gf_0+1,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \ +1.0*aux_gfs[IDX4S(which_gf_0+1,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \ aux_gfs[IDX4S(which_gf_0+2,i0,i1,i2)] = \ +3.0*aux_gfs[IDX4S(which_gf_0+2,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -3.0*aux_gfs[IDX4S(which_gf_0+2,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \ +1.0*aux_gfs[IDX4S(which_gf_0+2,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \ if(FACEX0*aux_gfs[IDX4S(which_gf_0+0,i0,i1,i2)] > 0.0) { \ aux_gfs[IDX4S(which_gf_0+0,i0,i1,i2)] = 0.0; \ } \ if(FACEX1*aux_gfs[IDX4S(which_gf_0+1,i0,i1,i2)] > 0.0) { \ aux_gfs[IDX4S(which_gf_0+1,i0,i1,i2)] = 0.0; \ } \ if(FACEX2*aux_gfs[IDX4S(which_gf_0+2,i0,i1,i2)] > 0.0) { \ aux_gfs[IDX4S(which_gf_0+2,i0,i1,i2)] = 0.0; \ } \ */ void apply_bcs(const paramstruct *restrict params,REAL *gfs,REAL *aux_gfs) { #include "../set_Cparameters.h" // First, we apply extrapolation boundary conditions to AD #pragma omp parallel for for(int which_gf=0;which_gf<NUM_EVOL_GFS;which_gf++) { if(which_gf < STILDED0GF || which_gf > STILDED2GF) { int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS0-NGHOSTS, Nxx_plus_2NGHOSTS1-NGHOSTS, Nxx_plus_2NGHOSTS2-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. FACE_UPDATE(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; FACE_UPDATE(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; FACE_UPDATE(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } } } // Apply outflow/extrapolation boundary conditions to ValenciavU by passing VALENCIAVU0 as which_gf_0 for(int which_gf=VALENCIAVU0GF;which_gf<=VALENCIAVU2GF;which_gf++) { int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS0-NGHOSTS, Nxx_plus_2NGHOSTS1-NGHOSTS, Nxx_plus_2NGHOSTS2-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { FACE_UPDATE_OUTFLOW(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE_OUTFLOW(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } } // Then, we apply copy boundary conditions to StildeD and psi6Phi /*#pragma omp parallel for for(int which_gf=3;which_gf<NUM_EVOL_GFS;which_gf++) { int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS0-NGHOSTS, Nxx_plus_2NGHOSTS1-NGHOSTS, Nxx_plus_2NGHOSTS2-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. FACE_UPDATE_COPY(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE_COPY(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } }*/ } // A supplement to the boundary conditions for debugging. This will overwrite data with exact conditions void FACE_UPDATE_EXACT(const paramstruct *restrict params,REAL *restrict xx[3], const int n, const REAL dt,REAL *out_gfs,REAL *aux_gfs, const int i0min,const int i0max, const int i1min,const int i1max, const int i2min,const int i2max, const int FACEX0,const int FACEX1,const int FACEX2) { #include "../set_Cparameters.h" for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { REAL xx0 = xx[0][i0]-n*dt; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; if(xx0<=lbound) { #include "../GiRaFFEfood_A_v_1D_tests_left.h" } else if (xx0<rbound) { #include "../GiRaFFEfood_A_v_1D_tests_center.h" } else { #include "../GiRaFFEfood_A_v_1D_tests_right.h" } out_gfs[IDX4S(PSI6PHIGF, i0,i1,i2)] = 0.0; } } void apply_bcs_EXACT(const paramstruct *restrict params,REAL *restrict xx[3], const int n, const REAL dt, REAL *out_gfs,REAL *aux_gfs) { #include "../set_Cparameters.h" int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS0-NGHOSTS, Nxx_plus_2NGHOSTS1-NGHOSTS, Nxx_plus_2NGHOSTS2-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. // Right now, we only want to update the xmin and xmax faces with the exact data. FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } } // A supplement to the boundary conditions for debugging. This will overwrite data with exact conditions void FACE_UPDATE_EXACT_StildeD(const paramstruct *restrict params,REAL *restrict xx[3], REAL *out_gfs,REAL *out_gfs_exact, const int i0min,const int i0max, const int i1min,const int i1max, const int i2min,const int i2max, const int FACEX0,const int FACEX1,const int FACEX2) { #include "../set_Cparameters.h" // This is currently modified to calculate more exact boundary conditions for StildeD. Rename if it works. /*for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { #include "../GiRaFFEfood_NRPy_Stilde.h" }*/ /*idx = IDX3(i0,i1,i2); out_gfs[IDX4ptS(STILDED0GF,idx)] = out_gfs_exact[IDX4ptS(STILDED0GF,idx)]; out_gfs[IDX4ptS(STILDED1GF,idx)] = out_gfs_exact[IDX4ptS(STILDED1GF,idx)]; out_gfs[IDX4ptS(STILDED2GF,idx)] = out_gfs_exact[IDX4ptS(STILDED2GF,idx)];*/ } void apply_bcs_EXACT_StildeD(const paramstruct *restrict params,REAL *restrict xx[3], REAL *out_gfs,REAL *out_gfs_exact) { #include "../set_Cparameters.h" int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS0-NGHOSTS, Nxx_plus_2NGHOSTS1-NGHOSTS, Nxx_plus_2NGHOSTS2-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. // Right now, we only want to update the xmin and xmax faces with the exact data. FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; //FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; //FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; //FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; //FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(16*t2-Nz+9,4)),2*t1+1),4*t1-4*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(8*t1+Ny+7,4)),floord(16*t2+Ny+3,4)),floord(16*t1-16*t2+Nz+Ny+5,4));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(16*t2-Nz-243,256)),ceild(4*t3-Ny-243,256));t4<=min(min(min(min(floord(4*Nt+Nx-9,256),floord(8*t1+Nx+7,256)),floord(16*t2+Nx+3,256)),floord(4*t3+Nx-9,256)),floord(16*t1-16*t2+Nz+Nx+5,256));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),t3-1),64*t4+62);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
convolution_7x7.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv7x7s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = out + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*49 + q*49; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* r6 = img0 + w*6; const float* r7 = img0 + w*7; const float* k0 = kernel0; const float* k1 = kernel0 + 7; const float* k2 = kernel0 + 14; const float* k3 = kernel0 + 21; const float* k4 = kernel0 + 28; const float* k5 = kernel0 + 35; const float* k6 = kernel0 + 42; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k4567 = vld1q_f32(k0 + 4); float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3 float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7 float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11 float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4 float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5 float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6 float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8 float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9 _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2); float32x4_t _k78910 = vld1q_f32(k1); float32x4_t _k11121314 = vld1q_f32(k1 + 4); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r10n = vld1q_f32(r1 + 8); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r15 = vextq_f32(_r14, _r10n, 1); float32x4_t _r16 = vextq_f32(_r14, _r10n, 2); _sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0); _sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1); _sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2); _sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3); _sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0); _sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1); _sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2); float32x4_t _k14151617 = vld1q_f32(k2); float32x4_t _k18192021 = vld1q_f32(k2 + 4); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r20n = vld1q_f32(r2 + 8); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r25 = vextq_f32(_r24, _r20n, 1); float32x4_t _r26 = vextq_f32(_r24, _r20n, 2); _sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0); _sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1); _sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2); _sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3); _sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0); _sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1); _sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2); float32x4_t _k21222324 = vld1q_f32(k3); float32x4_t _k25262728 = vld1q_f32(k3 + 4); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r30n = vld1q_f32(r3 + 8); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r35 = vextq_f32(_r34, _r30n, 1); float32x4_t _r36 = vextq_f32(_r34, _r30n, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0); _sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1); _sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2); _sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3); _sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0); _sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1); _sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2); float32x4_t _k28293031 = vld1q_f32(k4); float32x4_t _k32333435 = vld1q_f32(k4 + 4); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r40n = vld1q_f32(r4 + 8); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); float32x4_t _r45 = vextq_f32(_r44, _r40n, 1); float32x4_t _r46 = vextq_f32(_r44, _r40n, 2); _sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0); _sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1); _sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2); float32x4_t _k35363738 = vld1q_f32(k5); float32x4_t _k39404142 = vld1q_f32(k5 + 4); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r54 = vld1q_f32(r5 + 4); float32x4_t _r50n = vld1q_f32(r5 + 8); float32x4_t _r51 = vextq_f32(_r50, _r54, 1); float32x4_t _r52 = vextq_f32(_r50, _r54, 2); float32x4_t _r53 = vextq_f32(_r50, _r54, 3); float32x4_t _r55 = vextq_f32(_r54, _r50n, 1); float32x4_t _r56 = vextq_f32(_r54, _r50n, 2); _sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0); _sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1); _sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2); _sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3); _sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0); _sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1); _sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2); float32x4_t _k42434445 = vld1q_f32(k6); float32x4_t _k46474849 = vld1q_f32(k6 + 4); float32x4_t _r60 = vld1q_f32(r6); float32x4_t _r64 = vld1q_f32(r6 + 4); float32x4_t _r60n = vld1q_f32(r6 + 8); float32x4_t _r61 = vextq_f32(_r60, _r64, 1); float32x4_t _r62 = vextq_f32(_r60, _r64, 2); float32x4_t _r63 = vextq_f32(_r60, _r64, 3); float32x4_t _r65 = vextq_f32(_r64, _r60n, 1); float32x4_t _r66 = vextq_f32(_r64, _r60n, 2); _sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0); _sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1); _sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2); _sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3); _sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0); _sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1); _sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2); vst1q_f32(outptr, _sum); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; r6 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "vld1.f32 {d24-d25}, [%1] \n"// _sum "veor q13, q13 \n"// _sum2 = 0; "veor q14, q14 \n"// _sum3 = 0; "veor q15, q15 \n"// _sum4 = 0; "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567 "add %9, #28 \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3 "vmla.f32 q12, q0, d8[0] \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11 "vmla.f32 q13, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4 "vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8 "vmla.f32 q14, q1, d8[1] \n" "vmla.f32 q15, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5 "vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9 "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6 "vmla.f32 q14, q9, d9[1] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314 "add %9, #28 \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3]! \n" "vmla.f32 q15, q0, d12[0] \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3] \n" "vmla.f32 q12, q2, d14[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q13, q1, d12[1] \n" "vmla.f32 q14, q10, d14[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q11, d15[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q13, q9, d13[1] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021 "add %9, #28 \n" "pld [%4, #128] \n" "vld1.f32 {d0-d1}, [%4]! \n" "vmla.f32 q14, q0, d8[0] \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4] \n" "vmla.f32 q15, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q12, q1, d8[1] \n" "vmla.f32 q13, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q12, q9, d9[1] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728 "add %9, #28 \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "vmla.f32 q13, q0, d12[0] \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5] \n" "vmla.f32 q14, q2, d14[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q15, q1, d12[1] \n" "vmla.f32 q12, q10, d14[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q11, d15[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435 "add %9, #28 \n" "pld [%6, #128] \n" "vld1.f32 {d0-d1}, [%6]! \n" "vmla.f32 q12, q0, d8[0] \n" "pld [%6, #256] \n" "vld1.f32 {d4-d7}, [%6] \n" "vmla.f32 q13, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q14, q1, d8[1] \n" "vmla.f32 q15, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q14, q9, d9[1] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142 "add %9, #28 \n" "pld [%7, #128] \n" "vld1.f32 {d0-d1}, [%7]! \n" "vmla.f32 q15, q0, d12[0] \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7] \n" "vmla.f32 q12, q2, d14[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q13, q1, d12[1] \n" "vmla.f32 q14, q10, d14[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q11, d15[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q13, q9, d13[1] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849 "sub %9, #168 \n"// restore k0 "pld [%8, #128] \n" "vld1.f32 {d0-d1}, [%8]! \n" "vmla.f32 q14, q0, d8[0] \n" "pld [%8, #256] \n" "vld1.f32 {d4-d7}, [%8] \n" "vmla.f32 q15, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q12, q1, d8[1] \n" "vmla.f32 q13, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q12, q9, d9[1] \n" "vadd.f32 q13, q13, q14 \n" "vadd.f32 q13, q13, q15 \n" "vadd.f32 q12, q12, q13 \n" "vst1.f32 {d24-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5), // %7 "=r"(r6), // %8 "=r"(k0) // %9 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "8"(r6), "9"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r0[5] * k0[5]; sum += r0[6] * k0[6]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r1[5] * k1[5]; sum += r1[6] * k1[6]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r2[5] * k2[5]; sum += r2[6] * k2[6]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r3[5] * k3[5]; sum += r3[6] * k3[6]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum += r4[5] * k4[5]; sum += r4[6] * k4[6]; sum += r5[0] * k5[0]; sum += r5[1] * k5[1]; sum += r5[2] * k5[2]; sum += r5[3] * k5[3]; sum += r5[4] * k5[4]; sum += r5[5] * k5[5]; sum += r5[6] * k5[6]; sum += r6[0] * k6[0]; sum += r6[1] * k6[1]; sum += r6[2] * k6[2]; sum += r6[3] * k6[3]; sum += r6[4] * k6[4]; sum += r6[5] * k6[5]; sum += r6[6] * k6[6]; *outptr += sum; r0++; r1++; r2++; r3++; r4++; r5++; r6++; outptr++; } r0 += 6; r1 += 6; r2 += 6; r3 += 6; r4 += 6; r5 += 6; r6 += 6; } } } } static void conv7x7s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = out + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*49 + q*49; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* r6 = img0 + w*6; const float* k0 = kernel0; const float* k1 = kernel0 + 7; const float* k2 = kernel0 + 14; const float* k3 = kernel0 + 21; const float* k4 = kernel0 + 28; const float* k5 = kernel0 + 35; const float* k6 = kernel0 + 42; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k4567 = vld1q_f32(k0 + 4); float32x4x2_t _r00_02461357 = vld2q_f32(r0); float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8); float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14 float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15 float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6 float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8 float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9 float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10 float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11 float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12 _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2); float32x4_t _k78910 = vld1q_f32(k1); float32x4_t _k11121314 = vld1q_f32(k1 + 4); float32x4x2_t _r10_02461357 = vld2q_f32(r1); float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8); float32x4_t _r1_8101214 = _r10nx2.val[0]; float32x4_t _r1_9111315 = _r10nx2.val[1]; float32x4_t _r10 = _r10_02461357.val[0]; float32x4_t _r11 = _r10_02461357.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1); float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1); float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2); float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2); float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0); _sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1); _sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2); _sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3); _sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0); _sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1); _sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2); float32x4_t _k14151617 = vld1q_f32(k2); float32x4_t _k18192021 = vld1q_f32(k2 + 4); float32x4x2_t _r20_02461357 = vld2q_f32(r2); float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8); float32x4_t _r2_8101214 = _r20nx2.val[0]; float32x4_t _r2_9111315 = _r20nx2.val[1]; float32x4_t _r20 = _r20_02461357.val[0]; float32x4_t _r21 = _r20_02461357.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1); float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1); float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2); float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2); float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0); _sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1); _sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2); _sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3); _sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0); _sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1); _sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2); float32x4_t _k21222324 = vld1q_f32(k3); float32x4_t _k25262728 = vld1q_f32(k3 + 4); float32x4x2_t _r30_02461357 = vld2q_f32(r3); float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8); float32x4_t _r3_8101214 = _r30nx2.val[0]; float32x4_t _r3_9111315 = _r30nx2.val[1]; float32x4_t _r30 = _r30_02461357.val[0]; float32x4_t _r31 = _r30_02461357.val[1]; float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1); float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1); float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2); float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2); float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0); _sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1); _sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2); _sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3); _sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0); _sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1); _sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2); float32x4_t _k28293031 = vld1q_f32(k4); float32x4_t _k32333435 = vld1q_f32(k4 + 4); float32x4x2_t _r40_02461357 = vld2q_f32(r4); float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8); float32x4_t _r4_8101214 = _r40nx2.val[0]; float32x4_t _r4_9111315 = _r40nx2.val[1]; float32x4_t _r40 = _r40_02461357.val[0]; float32x4_t _r41 = _r40_02461357.val[1]; float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1); float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1); float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2); float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2); float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0); _sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1); _sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2); float32x4_t _k35363738 = vld1q_f32(k5); float32x4_t _k39404142 = vld1q_f32(k5 + 4); float32x4x2_t _r50_02461357 = vld2q_f32(r5); float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8); float32x4_t _r5_8101214 = _r50nx2.val[0]; float32x4_t _r5_9111315 = _r50nx2.val[1]; float32x4_t _r50 = _r50_02461357.val[0]; float32x4_t _r51 = _r50_02461357.val[1]; float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1); float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1); float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2); float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2); float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0); _sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1); _sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2); _sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3); _sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0); _sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1); _sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2); float32x4_t _k42434445 = vld1q_f32(k6); float32x4_t _k46474849 = vld1q_f32(k6 + 4); float32x4x2_t _r60_02461357 = vld2q_f32(r6); float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8); float32x4_t _r6_8101214 = _r60nx2.val[0]; float32x4_t _r6_9111315 = _r60nx2.val[1]; float32x4_t _r60 = _r60_02461357.val[0]; float32x4_t _r61 = _r60_02461357.val[1]; float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1); float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1); float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2); float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2); float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0); _sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1); _sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2); _sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3); _sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0); _sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1); _sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2); vst1q_f32(outptr, _sum); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; r5 += 8; r6 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "vld1.f32 {d26-d27}, [%1] \n"// _sum "veor q14, q14 \n"// _sum2 = 0; "veor q15, q15 \n"// _sum3 = 0; "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567 "add %9, #28 \n" "pld [%2, #512] \n" "vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7 "vmla.f32 q13, q0, d8[0] \n" "vmla.f32 q14, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15 "vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8 "vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9 "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q13, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10 "vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11 "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12 "vmla.f32 q13, q12, d11[0] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314 "add %9, #28 \n" "pld [%3, #512] \n" "vld2.f32 {d0-d3}, [%3]! \n" "vmla.f32 q14, q0, d12[0] \n" "vmla.f32 q15, q1, d12[1] \n" "vld2.f32 {d4-d7}, [%3] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q9, d13[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q13, q11, d14[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q14, q12, d15[0] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021 "add %9, #28 \n" "pld [%4, #512] \n" "vld2.f32 {d0-d3}, [%4]! \n" "vmla.f32 q15, q0, d8[0] \n" "vmla.f32 q13, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%4] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q15, q12, d11[0] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728 "add %9, #28 \n" "pld [%5, #512] \n" "vld2.f32 {d0-d3}, [%5]! \n" "vmla.f32 q13, q0, d12[0] \n" "vmla.f32 q14, q1, d12[1] \n" "vld2.f32 {d4-d7}, [%5] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q13, q9, d13[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q14, q10, d14[0] \n" "vmla.f32 q15, q11, d14[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q13, q12, d15[0] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435 "add %9, #28 \n" "pld [%6, #512] \n" "vld2.f32 {d0-d3}, [%6]! \n" "vmla.f32 q14, q0, d8[0] \n" "vmla.f32 q15, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%6] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q13, q8, d9[0] \n" "vmla.f32 q14, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q15, q10, d10[0] \n" "vmla.f32 q13, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q14, q12, d11[0] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142 "add %9, #28 \n" "pld [%7, #512] \n" "vld2.f32 {d0-d3}, [%7]! \n" "vmla.f32 q15, q0, d12[0] \n" "vmla.f32 q13, q1, d12[1] \n" "vld2.f32 {d4-d7}, [%7] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q14, q8, d13[0] \n" "vmla.f32 q15, q9, d13[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q13, q10, d14[0] \n" "vmla.f32 q14, q11, d14[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q15, q12, d15[0] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849 "sub %9, #168 \n"// restore k0 "pld [%8, #512] \n" "vld2.f32 {d0-d3}, [%8]! \n" "vmla.f32 q13, q0, d8[0] \n" "vmla.f32 q14, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%8] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q13, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q13, q12, d11[0] \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q13, q13, q14 \n" "vst1.f32 {d26-d27}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5), // %7 "=r"(r6), // %8 "=r"(k0) // %9 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "8"(r6), "9"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r0[5] * k0[5]; sum += r0[6] * k0[6]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r1[5] * k1[5]; sum += r1[6] * k1[6]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r2[5] * k2[5]; sum += r2[6] * k2[6]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r3[5] * k3[5]; sum += r3[6] * k3[6]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum += r4[5] * k4[5]; sum += r4[6] * k4[6]; sum += r5[0] * k5[0]; sum += r5[1] * k5[1]; sum += r5[2] * k5[2]; sum += r5[3] * k5[3]; sum += r5[4] * k5[4]; sum += r5[5] * k5[5]; sum += r5[6] * k5[6]; sum += r6[0] * k6[0]; sum += r6[1] * k6[1]; sum += r6[2] * k6[2]; sum += r6[3] * k6[3]; sum += r6[4] * k6[4]; sum += r6[5] * k6[5]; sum += r6[6] * k6[6]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; r5 += 2; r6 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; r5 += tailstep; r6 += tailstep; } } } } #if NCNN_CNNCACHE static void conv7x7s1_neon_cached( const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, bool* cached_map) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; // const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); // const float bias0 = bias ? bias[p] : 0.f; // out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = out + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*49 + q*49; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* r6 = img0 + w*6; const float* r7 = img0 + w*7; const float* k0 = kernel0; const float* k1 = kernel0 + 7; const float* k2 = kernel0 + 14; const float* k3 = kernel0 + 21; const float* k4 = kernel0 + 28; const float* k5 = kernel0 + 35; const float* k6 = kernel0 + 42; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k4567 = vld1q_f32(k0 + 4); float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3 float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7 float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11 float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4 float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5 float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6 float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8 float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9 _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2); float32x4_t _k78910 = vld1q_f32(k1); float32x4_t _k11121314 = vld1q_f32(k1 + 4); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r10n = vld1q_f32(r1 + 8); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r15 = vextq_f32(_r14, _r10n, 1); float32x4_t _r16 = vextq_f32(_r14, _r10n, 2); _sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0); _sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1); _sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2); _sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3); _sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0); _sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1); _sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2); float32x4_t _k14151617 = vld1q_f32(k2); float32x4_t _k18192021 = vld1q_f32(k2 + 4); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r20n = vld1q_f32(r2 + 8); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r25 = vextq_f32(_r24, _r20n, 1); float32x4_t _r26 = vextq_f32(_r24, _r20n, 2); _sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0); _sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1); _sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2); _sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3); _sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0); _sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1); _sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2); float32x4_t _k21222324 = vld1q_f32(k3); float32x4_t _k25262728 = vld1q_f32(k3 + 4); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r30n = vld1q_f32(r3 + 8); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r35 = vextq_f32(_r34, _r30n, 1); float32x4_t _r36 = vextq_f32(_r34, _r30n, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0); _sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1); _sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2); _sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3); _sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0); _sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1); _sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2); float32x4_t _k28293031 = vld1q_f32(k4); float32x4_t _k32333435 = vld1q_f32(k4 + 4); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r40n = vld1q_f32(r4 + 8); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); float32x4_t _r45 = vextq_f32(_r44, _r40n, 1); float32x4_t _r46 = vextq_f32(_r44, _r40n, 2); _sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0); _sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1); _sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2); float32x4_t _k35363738 = vld1q_f32(k5); float32x4_t _k39404142 = vld1q_f32(k5 + 4); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r54 = vld1q_f32(r5 + 4); float32x4_t _r50n = vld1q_f32(r5 + 8); float32x4_t _r51 = vextq_f32(_r50, _r54, 1); float32x4_t _r52 = vextq_f32(_r50, _r54, 2); float32x4_t _r53 = vextq_f32(_r50, _r54, 3); float32x4_t _r55 = vextq_f32(_r54, _r50n, 1); float32x4_t _r56 = vextq_f32(_r54, _r50n, 2); _sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0); _sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1); _sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2); _sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3); _sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0); _sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1); _sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2); float32x4_t _k42434445 = vld1q_f32(k6); float32x4_t _k46474849 = vld1q_f32(k6 + 4); float32x4_t _r60 = vld1q_f32(r6); float32x4_t _r64 = vld1q_f32(r6 + 4); float32x4_t _r60n = vld1q_f32(r6 + 8); float32x4_t _r61 = vextq_f32(_r60, _r64, 1); float32x4_t _r62 = vextq_f32(_r60, _r64, 2); float32x4_t _r63 = vextq_f32(_r60, _r64, 3); float32x4_t _r65 = vextq_f32(_r64, _r60n, 1); float32x4_t _r66 = vextq_f32(_r64, _r60n, 2); _sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0); _sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1); _sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2); _sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3); _sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0); _sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1); _sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2); vst1q_f32(outptr, _sum); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; r6 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "vld1.f32 {d24-d25}, [%1] \n"// _sum "veor q13, q13 \n"// _sum2 = 0; "veor q14, q14 \n"// _sum3 = 0; "veor q15, q15 \n"// _sum4 = 0; "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567 "add %9, #28 \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3 "vmla.f32 q12, q0, d8[0] \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11 "vmla.f32 q13, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4 "vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8 "vmla.f32 q14, q1, d8[1] \n" "vmla.f32 q15, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5 "vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9 "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6 "vmla.f32 q14, q9, d9[1] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314 "add %9, #28 \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3]! \n" "vmla.f32 q15, q0, d12[0] \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3] \n" "vmla.f32 q12, q2, d14[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q13, q1, d12[1] \n" "vmla.f32 q14, q10, d14[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q11, d15[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q13, q9, d13[1] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021 "add %9, #28 \n" "pld [%4, #128] \n" "vld1.f32 {d0-d1}, [%4]! \n" "vmla.f32 q14, q0, d8[0] \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4] \n" "vmla.f32 q15, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q12, q1, d8[1] \n" "vmla.f32 q13, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q12, q9, d9[1] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728 "add %9, #28 \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "vmla.f32 q13, q0, d12[0] \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5] \n" "vmla.f32 q14, q2, d14[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q15, q1, d12[1] \n" "vmla.f32 q12, q10, d14[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q11, d15[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435 "add %9, #28 \n" "pld [%6, #128] \n" "vld1.f32 {d0-d1}, [%6]! \n" "vmla.f32 q12, q0, d8[0] \n" "pld [%6, #256] \n" "vld1.f32 {d4-d7}, [%6] \n" "vmla.f32 q13, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q14, q1, d8[1] \n" "vmla.f32 q15, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q14, q9, d9[1] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142 "add %9, #28 \n" "pld [%7, #128] \n" "vld1.f32 {d0-d1}, [%7]! \n" "vmla.f32 q15, q0, d12[0] \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7] \n" "vmla.f32 q12, q2, d14[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q13, q1, d12[1] \n" "vmla.f32 q14, q10, d14[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q11, d15[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q13, q9, d13[1] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849 "sub %9, #168 \n"// restore k0 "pld [%8, #128] \n" "vld1.f32 {d0-d1}, [%8]! \n" "vmla.f32 q14, q0, d8[0] \n" "pld [%8, #256] \n" "vld1.f32 {d4-d7}, [%8] \n" "vmla.f32 q15, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q12, q1, d8[1] \n" "vmla.f32 q13, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q12, q9, d9[1] \n" "vadd.f32 q13, q13, q14 \n" "vadd.f32 q13, q13, q15 \n" "vadd.f32 q12, q12, q13 \n" "vst1.f32 {d24-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5), // %7 "=r"(r6), // %8 "=r"(k0) // %9 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "8"(r6), "9"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { if (cached_map[outptr - out]) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r0[5] * k0[5]; sum += r0[6] * k0[6]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r1[5] * k1[5]; sum += r1[6] * k1[6]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r2[5] * k2[5]; sum += r2[6] * k2[6]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r3[5] * k3[5]; sum += r3[6] * k3[6]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum += r4[5] * k4[5]; sum += r4[6] * k4[6]; sum += r5[0] * k5[0]; sum += r5[1] * k5[1]; sum += r5[2] * k5[2]; sum += r5[3] * k5[3]; sum += r5[4] * k5[4]; sum += r5[5] * k5[5]; sum += r5[6] * k5[6]; sum += r6[0] * k6[0]; sum += r6[1] * k6[1]; sum += r6[2] * k6[2]; sum += r6[3] * k6[3]; sum += r6[4] * k6[4]; sum += r6[5] * k6[5]; sum += r6[6] * k6[6]; *outptr += sum; } r0++; r1++; r2++; r3++; r4++; r5++; r6++; outptr++; } r0 += 6; r1 += 6; r2 += 6; r3 += 6; r4 += 6; r5 += 6; r6 += 6; } } } } static void conv7x7s2_neon_cached( const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, bool* cached_map) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; // const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); // const float bias0 = bias ? bias[p] : 0.f; // out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = out + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*49 + q*49; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* r6 = img0 + w*6; const float* k0 = kernel0; const float* k1 = kernel0 + 7; const float* k2 = kernel0 + 14; const float* k3 = kernel0 + 21; const float* k4 = kernel0 + 28; const float* k5 = kernel0 + 35; const float* k6 = kernel0 + 42; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k4567 = vld1q_f32(k0 + 4); float32x4x2_t _r00_02461357 = vld2q_f32(r0); float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8); float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14 float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15 float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6 float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8 float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9 float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10 float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11 float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12 _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2); float32x4_t _k78910 = vld1q_f32(k1); float32x4_t _k11121314 = vld1q_f32(k1 + 4); float32x4x2_t _r10_02461357 = vld2q_f32(r1); float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8); float32x4_t _r1_8101214 = _r10nx2.val[0]; float32x4_t _r1_9111315 = _r10nx2.val[1]; float32x4_t _r10 = _r10_02461357.val[0]; float32x4_t _r11 = _r10_02461357.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1); float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1); float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2); float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2); float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0); _sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1); _sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2); _sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3); _sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0); _sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1); _sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2); float32x4_t _k14151617 = vld1q_f32(k2); float32x4_t _k18192021 = vld1q_f32(k2 + 4); float32x4x2_t _r20_02461357 = vld2q_f32(r2); float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8); float32x4_t _r2_8101214 = _r20nx2.val[0]; float32x4_t _r2_9111315 = _r20nx2.val[1]; float32x4_t _r20 = _r20_02461357.val[0]; float32x4_t _r21 = _r20_02461357.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1); float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1); float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2); float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2); float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0); _sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1); _sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2); _sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3); _sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0); _sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1); _sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2); float32x4_t _k21222324 = vld1q_f32(k3); float32x4_t _k25262728 = vld1q_f32(k3 + 4); float32x4x2_t _r30_02461357 = vld2q_f32(r3); float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8); float32x4_t _r3_8101214 = _r30nx2.val[0]; float32x4_t _r3_9111315 = _r30nx2.val[1]; float32x4_t _r30 = _r30_02461357.val[0]; float32x4_t _r31 = _r30_02461357.val[1]; float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1); float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1); float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2); float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2); float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0); _sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1); _sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2); _sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3); _sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0); _sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1); _sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2); float32x4_t _k28293031 = vld1q_f32(k4); float32x4_t _k32333435 = vld1q_f32(k4 + 4); float32x4x2_t _r40_02461357 = vld2q_f32(r4); float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8); float32x4_t _r4_8101214 = _r40nx2.val[0]; float32x4_t _r4_9111315 = _r40nx2.val[1]; float32x4_t _r40 = _r40_02461357.val[0]; float32x4_t _r41 = _r40_02461357.val[1]; float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1); float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1); float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2); float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2); float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0); _sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1); _sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2); float32x4_t _k35363738 = vld1q_f32(k5); float32x4_t _k39404142 = vld1q_f32(k5 + 4); float32x4x2_t _r50_02461357 = vld2q_f32(r5); float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8); float32x4_t _r5_8101214 = _r50nx2.val[0]; float32x4_t _r5_9111315 = _r50nx2.val[1]; float32x4_t _r50 = _r50_02461357.val[0]; float32x4_t _r51 = _r50_02461357.val[1]; float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1); float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1); float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2); float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2); float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0); _sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1); _sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2); _sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3); _sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0); _sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1); _sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2); float32x4_t _k42434445 = vld1q_f32(k6); float32x4_t _k46474849 = vld1q_f32(k6 + 4); float32x4x2_t _r60_02461357 = vld2q_f32(r6); float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8); float32x4_t _r6_8101214 = _r60nx2.val[0]; float32x4_t _r6_9111315 = _r60nx2.val[1]; float32x4_t _r60 = _r60_02461357.val[0]; float32x4_t _r61 = _r60_02461357.val[1]; float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1); float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1); float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2); float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2); float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0); _sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1); _sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2); _sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3); _sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0); _sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1); _sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2); vst1q_f32(outptr, _sum); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; r5 += 8; r6 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "vld1.f32 {d26-d27}, [%1] \n"// _sum "veor q14, q14 \n"// _sum2 = 0; "veor q15, q15 \n"// _sum3 = 0; "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567 "add %9, #28 \n" "pld [%2, #512] \n" "vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7 "vmla.f32 q13, q0, d8[0] \n" "vmla.f32 q14, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15 "vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8 "vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9 "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q13, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10 "vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11 "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12 "vmla.f32 q13, q12, d11[0] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314 "add %9, #28 \n" "pld [%3, #512] \n" "vld2.f32 {d0-d3}, [%3]! \n" "vmla.f32 q14, q0, d12[0] \n" "vmla.f32 q15, q1, d12[1] \n" "vld2.f32 {d4-d7}, [%3] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q9, d13[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q13, q11, d14[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q14, q12, d15[0] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021 "add %9, #28 \n" "pld [%4, #512] \n" "vld2.f32 {d0-d3}, [%4]! \n" "vmla.f32 q15, q0, d8[0] \n" "vmla.f32 q13, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%4] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q15, q12, d11[0] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728 "add %9, #28 \n" "pld [%5, #512] \n" "vld2.f32 {d0-d3}, [%5]! \n" "vmla.f32 q13, q0, d12[0] \n" "vmla.f32 q14, q1, d12[1] \n" "vld2.f32 {d4-d7}, [%5] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q13, q9, d13[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q14, q10, d14[0] \n" "vmla.f32 q15, q11, d14[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q13, q12, d15[0] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435 "add %9, #28 \n" "pld [%6, #512] \n" "vld2.f32 {d0-d3}, [%6]! \n" "vmla.f32 q14, q0, d8[0] \n" "vmla.f32 q15, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%6] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q13, q8, d9[0] \n" "vmla.f32 q14, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q15, q10, d10[0] \n" "vmla.f32 q13, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q14, q12, d11[0] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142 "add %9, #28 \n" "pld [%7, #512] \n" "vld2.f32 {d0-d3}, [%7]! \n" "vmla.f32 q15, q0, d12[0] \n" "vmla.f32 q13, q1, d12[1] \n" "vld2.f32 {d4-d7}, [%7] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q14, q8, d13[0] \n" "vmla.f32 q15, q9, d13[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q13, q10, d14[0] \n" "vmla.f32 q14, q11, d14[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q15, q12, d15[0] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849 "sub %9, #168 \n"// restore k0 "pld [%8, #512] \n" "vld2.f32 {d0-d3}, [%8]! \n" "vmla.f32 q13, q0, d8[0] \n" "vmla.f32 q14, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%8] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q13, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q13, q12, d11[0] \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q13, q13, q14 \n" "vst1.f32 {d26-d27}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5), // %7 "=r"(r6), // %8 "=r"(k0) // %9 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "8"(r6), "9"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { if (cached_map[outptr - out]) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r0[5] * k0[5]; sum += r0[6] * k0[6]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r1[5] * k1[5]; sum += r1[6] * k1[6]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r2[5] * k2[5]; sum += r2[6] * k2[6]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r3[5] * k3[5]; sum += r3[6] * k3[6]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum += r4[5] * k4[5]; sum += r4[6] * k4[6]; sum += r5[0] * k5[0]; sum += r5[1] * k5[1]; sum += r5[2] * k5[2]; sum += r5[3] * k5[3]; sum += r5[4] * k5[4]; sum += r5[5] * k5[5]; sum += r5[6] * k5[6]; sum += r6[0] * k6[0]; sum += r6[1] * k6[1]; sum += r6[2] * k6[2]; sum += r6[3] * k6[3]; sum += r6[4] * k6[4]; sum += r6[5] * k6[5]; sum += r6[6] * k6[6]; *outptr += sum; } r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; r5 += 2; r6 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; r5 += tailstep; r6 += tailstep; } } } } #endif // NCNN_CNNCACHE
atkin.c
#include "atkin/atkin.h" #include "atkin/atkin_q1.h" #include "atkin/atkin_q2.h" #include "atkin/atkin_q3.h" #include <stdlib.h> /* malloc, free */ #include <string.h> /* memset */ #include <math.h> /* ceil, sqrt */ #define CHUNK_WIDTH 22 #include "common/defs.h" #include "common/enum.h" #include "common/presieve.h" #include "common/bitset.h" static inline int atkin_chunk_sieve_psquares(char* chunk, llong lower, llong upper, const char* sieved) { const llong chunk_size = upper - lower; for (llong p = 7; p * p < upper; ++p) { if (sieved[p] == 1) { llong p_sqr = p*p; llong mod = lower % p_sqr; llong i = p_sqr * (mod != 0) - mod; for (; i < chunk_size; i += p_sqr) { bitset_clear(chunk, i); } } } return 0; } static inline int atkin_chunk(char* chunk, llong lower, llong upper, const char* sieved) { memset(chunk, 0, CHUNK_BYTES); atkin_chunk_q1(chunk, lower, upper); atkin_chunk_q2(chunk, lower, upper); atkin_chunk_q3(chunk, lower, upper); atkin_chunk_sieve_psquares(chunk, lower, upper, sieved); return 0; } static inline void atkin_precomp() { atkin_q1_precomp(); atkin_q2_precomp(); atkin_q3_precomp(); } llong atkin(llong lower, llong upper, int print) { llong ret = 0; llong root = sqrt(upper) + 1; char* arr = (char*)malloc(root * sizeof(char)); if (arr == NULL) { return ret; } enumerate_bitset_precomp(); atkin_precomp(); erath_less_than(arr, root); ret = enumerate(arr + lower, arr + root, lower, print); char* chunk = (char*)malloc(CHUNK_BYTES); if (chunk == NULL) { free(arr); return 0; } bitset_enum_func enumerator = enumerate_bitset; if (print != 0) { enumerator = enumerate_bitset_print; } llong chunk_lower = (root < lower) ? lower : root; llong chunk_upper = chunk_lower + CHUNK_SIZE ; for (; chunk_upper < upper; chunk_lower += CHUNK_SIZE, chunk_upper += CHUNK_SIZE) { atkin_chunk(chunk, chunk_lower, chunk_upper, arr); ret += enumerator(chunk, CHUNK_BYTES, chunk_lower); } unsigned last_chunk_bytes = ceil((upper - chunk_lower) / 8.0); atkin_chunk(chunk, chunk_lower, upper, arr); ret += enumerator(chunk, last_chunk_bytes, chunk_lower); free(chunk); free(arr); return ret; } llong atkin_mt(llong lower, llong upper, int print) { llong ret = 0; llong root = sqrt(upper) + 1; char* arr = (char*)malloc(root * sizeof(char)); if (arr == NULL) { return ret; } enumerate_bitset_precomp(); atkin_precomp(); erath_less_than(arr, root); ret = enumerate(arr + lower, arr + root, lower, print); bitset_enum_func enumerator = enumerate_bitset; if (print != 0) { enumerator = enumerate_bitset_print; } llong i, first_chunk_start = (root < lower) ? lower : root; #pragma omp parallel for ordered, schedule(dynamic) for (i = first_chunk_start; i < upper; i += CHUNK_SIZE) { char* chunk = malloc(CHUNK_BYTES); llong chunk_size = CHUNK_SIZE; if ((i + chunk_size) > upper) { chunk_size = upper - i; } atkin_chunk(chunk, i, i + chunk_size, arr); #pragma omp ordered ret += enumerator(chunk, CHUNK_BYTES, i); free(chunk); } free(arr); return ret; }
cFunctions.c
#include <Python.h> #include "numpy/arrayobject.h" #include <stdlib.h> #if defined (_OPENMP) # include <omp.h> #endif /* This file contains a C implementation of the calc_OFTI method and submethods: - eccentricity_anomaly - mikkola_solve - danbySolve - calc_accel - scale_and_rotate - calc_XYZ - calc_OFTI Only the calc_OFTI method can interface with a python program, the other are internal helper functions based on the original python version. This C program provides OpenMP support. The only difference in interfacing with the calc_OFTI program is that it will return one single numpy array containing all necessary variables versus a tuple. */ //to emulate the return tuple of python, fourDouble is a structure of four doubles typedef struct fourDouble{ double one,two,three,four; }four_double; //Calculates eccentricity anomaly -- C version double eccentricity_anomaly(double E,double e,double M){ return E-(e*sin(E))-M; } //Mikkola solver -- C version double mikkola_solve(double M, double e){ double alpha = (1.0-e)/((4.*e)+0.5); double beta = (0.5*M)/((4.*e)+0.5); double ab = sqrt(pow(beta,2.0)+pow(alpha,3.0)); double z = pow(fabs(beta+ab),1./3.); double s1 = z-alpha/z; double ds = -0.078*(pow(s1,5.0))/(1+e); double s = s1+ds; double E0 = M + e*(3.0*s-4.0*pow(s,3.0)); double sinE = sin(E0); double cosE = cos(E0); double f = E0-e*sinE-M; double fp = 1. -e*cosE; double fpp = e*sinE; double fppp = e*cosE; double fpppp = -fpp; double dx1 = -f/fp; double dx2 = -f/(fp+0.5*fpp*dx1); double dx3 = -f/(fp+0.5*fpp*dx2 + (1./6.)*fppp*(dx2*dx2)); double dx4 = -f/(fp+0.5*fpp*dx3 + (1./6.)*fppp*(dx3*dx3)+(1./24.)*(fpppp)*pow(dx3,3.0)); return E0+dx4; } //Newton-Ralphson eccentricity anomaly solver ("Danby" method). C version double danbySolve(double M0,double e,double h){ int maxnum = 50; double delta_D = 1.; double k = 0.85; double E0 = M0 + copysign(1.0,sin(M0))*k*e; double lastE = E0; double nextE = lastE + 10.0*h; int number = 0; double iterator = delta_D; //using this for now, for some reason doesn't work with delta_D? //Minimal performance increase anyways, so not bothering to fully trace problem/solution. while((iterator>h)&& number<maxnum+1){ double ex = eccentricity_anomaly(nextE,e,M0); double ep = (1.0-e*cos(lastE)); double epp = e*sin(lastE); double eppp = e*cos(lastE); lastE = nextE; double delta_N = -ex/ep; double delta_H = -ex/(ep+0.5*epp*delta_N); double delta_D = -ex/(ep+0.5*epp*delta_H+(1./6.)*eppp*delta_H*delta_H); nextE = lastE + delta_D; number+=1; if(number>=maxnum){ nextE = mikkola_solve(M0,e); } iterator = delta_D; } return nextE; } //calculate acceleration -- C version four_double *calc_accel(double a,double T,double to,double e,double i,double w,double O,double date,double dist, double E){ double a_km=a*dist*149598073.0; double n = (2.0*M_PI)/T; double f1=sqrt(1.0+e)*sin(E/2.0); double f2 = sqrt(1.0-e)*cos(E/2.0); double f = 2.*atan2(f1,f2); double r=(a_km*(1-e*e))/(1+e*cos(f)); double Edot = n/(1-e*cos(E)); double rdot = e*sin(f)*((n*a_km)/(sqrt(1-e*e))); double fdot = (n*(1+e*cos(f)))/(1-e*e)*sin(f)/sin(E); double Eddot = ((-n*e*sin(f))/(1-e*e))*fdot; double rddot = a_km*e*cos(E)*(Edot*Edot)+a_km*e*sin(E)*Eddot; double fddot = Eddot*(sin(f)/sin(E))-(Edot*Edot)*(e*sin(f)/(1-e*cos(E))); double Xddot = (rddot - r*fdot*fdot)*(cos(O)*cos(w+f) - sin(O)*sin(w+f)*cos(i)) + (-2*rdot*fdot - r*fddot)*(cos(O)*sin(w+f) + sin(O)*cos(w+f)*cos(i)); double Yddot = (rddot - r*fdot*fdot)*(sin(O)*cos(w+f) + cos(O)*sin(w+f)*cos(i)) + (2*rdot*fdot + r*fddot)*(sin(O)*sin(w+f) + cos(O)*cos(w+f)*cos(i)); double Zddot = sin(i)*((rddot - r*(fdot*fdot))*sin(w+f) + ((2*rdot*fdot + r*fddot)*cos(w+f))); four_double* return_tuple = (four_double*)malloc(sizeof(four_double)); double constantMultiply = 1000./(60.*60.*24.*365.); return_tuple -> one = Xddot*constantMultiply; return_tuple -> two = Yddot*constantMultiply; return_tuple -> three = Zddot*constantMultiply; return return_tuple; } double rand_gen() { return ( (double)(rand()) + 1. )/( (double)(RAND_MAX) + 1. ); } //scale and rotate -- C version four_double *scale_and_rotate(double xi, double yi, double rho1, double rho2, double pa1, double pa2, double ai, double constantM,double m1,double dist,double d){ double PA_rand = sqrt(-2*log((double)rand() / (double)RAND_MAX))*cos(2*M_PI*(double)rand() / (double)RAND_MAX)*pa2+pa1; double r_model = sqrt(xi*xi+yi*yi); double rho_rand = sqrt(-2*log(rand_gen()))*cos(2*M_PI*rand_gen())*rho2/1000.+rho1/1000.; //printf("\n %f",PA_rand); //printf("\n %f",rho_rand); double a2i = ai*(rho_rand/r_model); double a2_au = a2i*dist; double T2i = sqrt(pow(fabs(a2_au),3.0)/fabs(m1)); double to2i = d-(constantM*T2i); double PA_model = fmod((atan2(xi,-1*yi)*180/M_PI+270),360.0); double O2i; if(PA_model<0){ O2i = PA_rand-PA_model+360.0; } else{ O2i = PA_rand-PA_model; } O2i*=M_PI/180; four_double* return_tuple = (four_double*)malloc(sizeof(four_double)); return_tuple->one=a2i; return_tuple->two=T2i; return_tuple->three=to2i; return_tuple->four=O2i; return return_tuple; } //calculate XYZ -- C version four_double* calc_XYZ(double a,double T,double to,double e,double i,double w,double O,double date) { double M0 = (2*M_PI)/T*(date-to); double eTemp = e; double ETemp; ETemp = danbySolve(M0,eTemp,0.001); double f1 = sqrt(1.+eTemp)*sin(ETemp/2.); double f2 = sqrt(1.-eTemp)*cos(ETemp/2.0); double f = 2.0*atan2(f1,f2); double r = a*(1.0-eTemp*eTemp)/(1.0+(eTemp*cos(f))); double xAdd = r*(cos(O)*cos(w+f)-sin(O)*sin(w+f)*cos(i)); double yAdd = r*(sin(O)*cos(w+f)+cos(O)*sin(w+f)*cos(i)); double zAdd = r*sin(w+f)*sin(i); four_double* return_tuple = (four_double*)malloc(sizeof(four_double)); return_tuple->one=xAdd; return_tuple->two=yAdd; return_tuple->three=zAdd; return_tuple->four=ETemp; return return_tuple; } //calculate velocities -- C version four_double* calc_velocities(double a, double T, double to, double e, double i, double w, double O, double date, double dist,double E){ double a_km = a*dist*149598073.0; double n = (2*M_PI)/T; double f1 = sqrt(1.+e)*sin(E/2.); double f2 = sqrt(1.-e)*cos(E/2.); double f = 2.*atan2(f1,f2); double rdot = ((n*a_km)/(sqrt(1.-e*e)))*e*sin(f); double rfdot = ((n*a_km)/sqrt(1.-e*e))*(1.+e*cos(f)); double Xdot = rdot*(cos(O)*cos(w+f)-sin(O)*sin(w+f)*cos(i))+rfdot*(-cos(O)*sin(w+f)-sin(O)*cos(w+f)*cos(i)); double Ydot = rdot * (sin(O)*cos(w+f)+cos(O)*sin(w+f)*cos(i))+rfdot*(-sin(O)*sin(w+f)+cos(O)*cos(w+f)*cos(i)); double Zdot = ((n*a_km)/(sqrt(1-e*e)))*sin(i)*(cos(w+f)+e*cos(w)); four_double* return_tuple = (four_double*)malloc(sizeof(four_double)); double constantMultiply = 1./(60.*60.*24.*365.); return_tuple-> one = Xdot*constantMultiply; return_tuple -> two = Ydot*constantMultiply; return_tuple-> three = Zdot*constantMultiply; return return_tuple; } PyObject* calcOFTI(PyObject *self, PyObject *args){ double* a,*constant,*T,*to,*e,*i,*w,*O,*m1,*dist; double date; PyObject *rho,*pa; PyObject* ArrayTotalObject; PyObject* returnArray; if(!PyArg_ParseTuple(args, "OdOOO",&ArrayTotalObject,&date,&rho,&pa,&returnArray)) { return NULL; } //convert to contiguous array so data can be accessed easily PyArrayObject* ArrayTotal = (PyArrayObject *)PyArray_ContiguousFromObject(ArrayTotalObject,NPY_FLOAT64,1,2); double* arrayData = (double*)(ArrayTotal->data); int m = ArrayTotal ->dimensions[1]; PyArrayObject* numpyArrayReturn = (PyArrayObject *)PyArray_ContiguousFromObject(returnArray,NPY_FLOAT64,1,2); double* dataReturn = (double*)(numpyArrayReturn->data); //set variables as subarrays of main array a = &arrayData[0]; T = &arrayData[1*m]; constant = &arrayData[2*m]; to = &arrayData[3*m]; e = &arrayData[4*m]; i = &arrayData[5*m]; w = &arrayData[6*m]; O = &arrayData[7*m]; m1 = &arrayData[8*m]; dist = &arrayData[9*m]; //extract values from input tuples double rho1 = PyFloat_AsDouble(PyTuple_GetItem(rho,0)); double rho2 = PyFloat_AsDouble(PyTuple_GetItem(rho,1)); double pa1 = PyFloat_AsDouble(PyTuple_GetItem(pa,0)); double pa2 = PyFloat_AsDouble(PyTuple_GetItem(pa,1)); int returnD1 = 18; //allocate return array //double* dataReturn = (double*)PyMem_RawCalloc((size_t)m*returnD1,(size_t)sizeof(double)); #if defined (_OPENMP) #pragma omp parallel for #endif for(int j = 0; j< m;j++){ four_double* calc_XYZReturn1 = calc_XYZ(a[j],T[j],to[j],e[j],i[j],w[j],O[j],date); four_double* sarR = scale_and_rotate(calc_XYZReturn1->one,calc_XYZReturn1->two,rho1,rho2,pa1,pa2,a[j],constant[j],m1[j],dist[j],date); four_double* calc_XYZReturn2 = calc_XYZ(sarR->one,sarR->two,sarR->three,e[j],i[j],w[j],sarR->four,date); calc_XYZReturn2->one=(calc_XYZReturn2->one)*1000.; calc_XYZReturn2->two = (calc_XYZReturn2->two)*1000.; calc_XYZReturn2->three = (calc_XYZReturn2->three)*1000.; four_double* calcVelocitiesR = calc_velocities(sarR->one,sarR->two,sarR->three,e[j],i[j],w[j],sarR->four,date,dist[j],calc_XYZReturn2->four); four_double* calcAccelR = calc_accel(sarR->one,sarR->two,sarR->three,e[j],i[j],w[j],sarR->four,date,dist[j],calc_XYZReturn2->four); //set values of return array dataReturn[0*m+j] = calc_XYZReturn2->one; // X2 dataReturn[1*m+j] = calc_XYZReturn2->two; // Y2 dataReturn[2*m+j] = calc_XYZReturn2->three; // Z2 dataReturn[3*m+j] = calcVelocitiesR->one; // Xdot dataReturn[4*m+j]= calcVelocitiesR->two; // Ydot dataReturn[5*m+j] = calcVelocitiesR->three; // Zdot dataReturn[6*m+j] = calcAccelR->one; // Xddot dataReturn[7*m+j] = calcAccelR->two; // Yddot dataReturn[8*m+j] = calcAccelR->three; // Zddot dataReturn[9*m+j] = sarR->one; // a2 dataReturn[10*m+j] = sarR->two; // T2 dataReturn[11*m+j] = constant[j]; dataReturn[12*m+j] = sarR->three; // to2 dataReturn[13*m+j] = e[j]; // e dataReturn[14*m+j] = i[j]*180/M_PI; // i in degrees dataReturn[15*m+j] = w[j]*180/M_PI; // w in degrees dataReturn[16*m+j]= (sarR->four)*180/M_PI; // O2 in degrees dataReturn[17*m+j] = m1[j]; // total mass dataReturn[18*m+j] = dist[j]; // distance //free four_double structs free(calc_XYZReturn1); free(calc_XYZReturn2); free(sarR); free(calcVelocitiesR); free(calcAccelR); } //need to decref input array so that it is freed from memory Py_DECREF(ArrayTotalObject); return PyArray_Return(numpyArrayReturn); } static char calcOFTI_Cdoc[] = " C version of the calc_OFTI method." "If this fails, use python version with keyword python_version=True in the FitOrbit() call" "Also contains internal calc_XYZ, calc_velocity,calc_accel,and rotate_and_scale methods" "that cannot be interfaced with python"; static PyMethodDef calcOFTI_methods[] = { {"calcOFTI_C", calcOFTI, METH_VARARGS, calcOFTI_Cdoc}, {NULL} }; static struct PyModuleDef cFunctions = { PyModuleDef_HEAD_INIT, "cFunctions", calcOFTI_Cdoc, -1, calcOFTI_methods }; PyMODINIT_FUNC PyInit_cFunctions(void) { PyObject* module = PyModule_Create(&cFunctions); import_array(); return module; }
sparselu.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <libgen.h> #include "bots.h" #include "sparselu.h" /*********************************************************************** * checkmat: **********************************************************************/ int checkmat (float *M, float *N) { int i, j; float r_err; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j]; if ( r_err == 0.0 ) continue; if (r_err < 0.0 ) r_err = -r_err; if ( M[i*bots_arg_size_1+j] == 0 ) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j]); return FALSE; } r_err = r_err / M[i*bots_arg_size_1+j]; if(r_err > EPSILON) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err); return FALSE; } } } return TRUE; } /*********************************************************************** * genmat: **********************************************************************/ void genmat (float *M[]) { int null_entry, init_val, i, j, ii, jj; float *p; int a=0,b=0; init_val = 1325; /* generating the structure */ for (ii=0; ii < bots_arg_size; ii++) { for (jj=0; jj < bots_arg_size; jj++) { /* computing null entries */ null_entry=FALSE; if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE; if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE; if (ii%2==1) null_entry = TRUE; if (jj%2==1) null_entry = TRUE; if (ii==jj) null_entry = FALSE; if (ii==jj-1) null_entry = FALSE; if (ii-1 == jj) null_entry = FALSE; /* allocating matrix */ if (null_entry == FALSE){ a++; M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); if ((M[ii*bots_arg_size+jj] == NULL)) { bots_message("Error: Out of memory\n"); exit(101); } /* initializing matrix */ p = M[ii*bots_arg_size+jj]; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { init_val = (3125 * init_val) % 65536; (*p) = (float)((init_val - 32768.0) / 16384.0); p++; } } } else { b++; M[ii*bots_arg_size+jj] = NULL; } } } bots_debug("allo = %d, no = %d, total = %d, factor = %f\n",a,b,a+b,(float)((float)a/(float)(a+b))); } /*********************************************************************** * print_structure: **********************************************************************/ void print_structure(char *name, float *M[]) { int ii, jj; bots_message("Structure for matrix %s @ 0x%p\n",name, M); for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");} else bots_message(" "); } bots_message("\n"); } bots_message("\n"); } /*********************************************************************** * allocate_clean_block: **********************************************************************/ float * allocate_clean_block() { int i,j; float *p, *q; p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); q=p; if (p!=NULL){ for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;} } else { bots_message("Error: Out of memory\n"); exit (101); } return (q); } /*********************************************************************** * lu0: **********************************************************************/ void lu0(float *diag) { int i, j, k; for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) { diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bdiv: **********************************************************************/ void bdiv(float *diag, float *row) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (k=0; k<bots_arg_size_1; k++) { row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bmod: **********************************************************************/ void bmod(float *row, float *col, float *inner) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } /*********************************************************************** * fwd: **********************************************************************/ void fwd(float *diag, float *col) { int i, j, k; for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } void sparselu_init (float ***pBENCH, char *pass) { *pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *)); genmat(*pBENCH); print_structure(pass, *pBENCH); } void sparselu_seq_call(float **BENCH) { int ii, jj, kk; for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } } void sparselu_par_call(float **BENCH) { int ii, jj, kk; bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ", bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1); #pragma omp parallel private(kk) { for (kk=0; kk<bots_arg_size; kk++) { #pragma omp single lu0(BENCH[kk*bots_arg_size+kk]); #pragma omp for nowait for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) #pragma omp task untied firstprivate(kk, jj) shared(BENCH) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } #pragma omp for for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) #pragma omp task untied firstprivate(kk, ii) shared(BENCH) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } #pragma omp for private(jj) for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) #pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } } bots_message(" completed!\n"); } void sparselu_fini (float **BENCH, char *pass) { print_structure(pass, BENCH); } int sparselu_check(float **SEQ, float **BENCH) { int ii,jj,ok=1; for (ii=0; ((ii<bots_arg_size) && ok); ii++) { for (jj=0; ((jj<bots_arg_size) && ok); jj++) { if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } if (ok) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; }
SplineC2CAdoptor.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// /** @file SplineC2CSoA.h * * Adoptor classes to handle complex-to-(real,complex) with arbitrary precision */ #ifndef QMCPLUSPLUS_EINSPLINE_C2C_SOA_ADOPTOR_H #define QMCPLUSPLUS_EINSPLINE_C2C_SOA_ADOPTOR_H #include <OhmmsSoA/Container.h> #include <spline2/MultiBspline.hpp> #include <spline2/MultiBsplineEval.hpp> #include "QMCWaveFunctions/BsplineFactory/SplineAdoptorBase.h" #include <Utilities/FairDivide.h> namespace qmcplusplus { /** adoptor class to match std::complex<ST> spline with std::complex<TT> SPOs * @tparam ST precision of spline * @tparam TT precision of SPOs * @tparam D dimension * * Requires temporage storage and multiplication of phase vectors * Internal storage use double sized arrays of ST type, aligned and padded. */ template<typename ST, typename TT> struct SplineC2CSoA: public SplineAdoptorBase<ST,3> { static const int D=3; using BaseType=SplineAdoptorBase<ST,3>; using SplineType=typename bspline_traits<ST,3>::SplineType; using BCType=typename bspline_traits<ST,3>::BCType; using DataType=ST; using PointType=typename BaseType::PointType; using SingleSplineType=typename BaseType::SingleSplineType; using ComplexT=typename std::complex<TT>; using vContainer_type=Vector<ST,aligned_allocator<ST> >; using gContainer_type=VectorSoaContainer<ST,3>; using hContainer_type=VectorSoaContainer<ST,6>; using BaseType::first_spo; using BaseType::last_spo; using BaseType::GGt; using BaseType::PrimLattice; using BaseType::kPoints; using BaseType::MakeTwoCopies; using BaseType::offset; ///number of points of the original grid int BaseN[3]; ///offset of the original grid, always 0 int BaseOffset[3]; ///multi bspline set MultiBspline<ST>* SplineInst; ///expose the pointer to reuse the reader and only assigned with create_spline ///also used as identifier of shallow copy SplineType* MultiSpline; vContainer_type mKK; VectorSoaContainer<ST,3> myKcart; vContainer_type myV; vContainer_type myL; gContainer_type myG; hContainer_type myH; SplineC2CSoA(): BaseType(), SplineInst(nullptr), MultiSpline(nullptr) { this->is_complex=true; this->is_soa_ready=true; this->AdoptorName="SplineC2CSoAAdoptor"; this->KeyWord="SplineC2CSoA"; } SplineC2CSoA(const SplineC2CSoA& a): SplineAdoptorBase<ST,3>(a),SplineInst(a.SplineInst),MultiSpline(nullptr), mKK(a.mKK), myKcart(a.myKcart) { const size_t n=a.myL.size(); myV.resize(n); myG.resize(n); myL.resize(n); myH.resize(n); } ~SplineC2CSoA() { if(MultiSpline != nullptr) delete SplineInst; } inline void resizeStorage(size_t n, size_t nvals) { BaseType::init_base(n); size_t npad=getAlignedSize<ST>(2*n); myV.resize(npad); myG.resize(npad); myL.resize(npad); myH.resize(npad); } void bcast_tables(Communicate* comm) { chunked_bcast(comm, MultiSpline); } void gather_tables(Communicate* comm) { if(comm->size()==1) return; const int Nbands = kPoints.size(); const int Nbandgroups = comm->size(); offset.resize(Nbandgroups+1,0); FairDivideLow(Nbands,Nbandgroups,offset); for(size_t ib=0; ib<offset.size(); ib++) offset[ib]*=2; gatherv(comm, MultiSpline, MultiSpline->z_stride, offset); } template<typename GT, typename BCT> void create_spline(GT& xyz_g, BCT& xyz_bc) { resize_kpoints(); SplineInst=new MultiBspline<ST>(); SplineInst->create(xyz_g,xyz_bc,myV.size()); MultiSpline=SplineInst->spline_m; for(size_t i=0; i<D; ++i) { BaseOffset[i]=0; BaseN[i]=xyz_g[i].num+3; } qmc_common.memory_allocated += SplineInst->sizeInByte(); } inline void flush_zero() { SplineInst->flush_zero(); } /** remap kPoints to pack the double copy */ inline void resize_kpoints() { const size_t nk=kPoints.size(); mKK.resize(nk); myKcart.resize(nk); for(size_t i=0; i<nk; ++i) { mKK[i]=-dot(kPoints[i],kPoints[i]); myKcart(i)=kPoints[i]; } } inline void set_spline(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, int level) { SplineInst->copy_spline(spline_r,2*ispline ,BaseOffset, BaseN); SplineInst->copy_spline(spline_i,2*ispline+1,BaseOffset, BaseN); } void set_spline(ST* restrict psi_r, ST* restrict psi_i, int twist, int ispline, int level) { Vector<ST> v_r(psi_r,0), v_i(psi_i,0); SplineInst->set(2*ispline ,v_r); SplineInst->set(2*ispline+1,v_i); } inline void set_spline_domain(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, const int* offset_l, const int* mesh_l) { } bool read_splines(hdf_archive& h5f) { std::ostringstream o; o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex; einspline_engine<SplineType> bigtable(SplineInst->spline_m); return h5f.read(bigtable,o.str().c_str());//"spline_0"); } bool write_splines(hdf_archive& h5f) { std::ostringstream o; o<<"spline_" << SplineAdoptorBase<ST,D>::MyIndex; einspline_engine<SplineType> bigtable(SplineInst->spline_m); return h5f.write(bigtable,o.str().c_str());//"spline_0"); } template<typename VV> inline void assign_v(const PointType& r, const vContainer_type& myV, VV& psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); const ST x=r[0], y=r[1], z=r[2]; const ST* restrict kx=myKcart.data(0); const ST* restrict ky=myKcart.data(1); const ST* restrict kz=myKcart.data(2); #pragma omp simd for (size_t j=first; j<last; ++j) { ST s, c; const ST val_r=myV[2*j ]; const ST val_i=myV[2*j+1]; sincos(-(x*kx[j]+y*ky[j]+z*kz[j]),&s,&c); psi[j+first_spo] = ComplexT(val_r*c-val_i*s,val_i*c+val_r*s); } } template<typename VV> inline void evaluate_v(const ParticleSet& P, const int iat, VV& psi) { const PointType& r=P.activeR(iat); PointType ru(PrimLattice.toUnit_floor(r)); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d(SplineInst->spline_m,ru,myV,first,last); assign_v(r,myV,psi,first/2,last/2); } } template<typename VM, typename VAV> inline void evaluateValues(const VirtualParticleSet& VP, VM& psiM, VAV& SPOMem) { #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); const size_t m=psiM.cols(); for(int iat=0; iat<VP.getTotalNum(); ++iat) { const PointType& r=VP.activeR(iat); PointType ru(PrimLattice.toUnit_floor(r)); Vector<ComplexT> psi(psiM[iat],m); spline2::evaluate3d(SplineInst->spline_m,ru,myV,first,last); assign_v(r,myV,psi,first/2,last/2); } } } inline size_t estimateMemory(const int nP) { return 0; } /** assign_vgl */ template<typename VV, typename GV> inline void assign_vgl(const PointType& r, VV& psi, GV& dpsi, VV& d2psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); constexpr ST zero(0); constexpr ST two(2); const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2), g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5), g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8); const ST x=r[0], y=r[1], z=r[2]; const ST symGG[6]={GGt[0],GGt[1]+GGt[3],GGt[2]+GGt[6],GGt[4],GGt[5]+GGt[7],GGt[8]}; const ST* restrict k0=myKcart.data(0); const ST* restrict k1=myKcart.data(1); const ST* restrict k2=myKcart.data(2); const ST* restrict g0=myG.data(0); const ST* restrict g1=myG.data(1); const ST* restrict g2=myG.data(2); const ST* restrict h00=myH.data(0); const ST* restrict h01=myH.data(1); const ST* restrict h02=myH.data(2); const ST* restrict h11=myH.data(3); const ST* restrict h12=myH.data(4); const ST* restrict h22=myH.data(5); #pragma omp simd for (size_t j=first; j<last; ++j) { const size_t jr=j<<1; const size_t ji=jr+1; const ST kX=k0[j]; const ST kY=k1[j]; const ST kZ=k2[j]; const ST val_r=myV[jr]; const ST val_i=myV[ji]; //phase ST s, c; sincos(-(x*kX+y*kY+z*kZ),&s,&c); //dot(PrimLattice.G,myG[j]) const ST dX_r = g00*g0[jr]+g01*g1[jr]+g02*g2[jr]; const ST dY_r = g10*g0[jr]+g11*g1[jr]+g12*g2[jr]; const ST dZ_r = g20*g0[jr]+g21*g1[jr]+g22*g2[jr]; const ST dX_i = g00*g0[ji]+g01*g1[ji]+g02*g2[ji]; const ST dY_i = g10*g0[ji]+g11*g1[ji]+g12*g2[ji]; const ST dZ_i = g20*g0[ji]+g21*g1[ji]+g22*g2[ji]; // \f$\nabla \psi_r + {\bf k}\psi_i\f$ const ST gX_r=dX_r+val_i*kX; const ST gY_r=dY_r+val_i*kY; const ST gZ_r=dZ_r+val_i*kZ; const ST gX_i=dX_i-val_r*kX; const ST gY_i=dY_i-val_r*kY; const ST gZ_i=dZ_i-val_r*kZ; const ST lcart_r=SymTrace(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],symGG); const ST lcart_i=SymTrace(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],symGG); const ST lap_r=lcart_r+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i); const ST lap_i=lcart_i+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r); const size_t psiIndex=j+first_spo; psi[psiIndex ] = ComplexT(c*val_r-s*val_i,c*val_i+s*val_r); dpsi[psiIndex][0]= ComplexT(c*gX_r -s*gX_i, c*gX_i +s*gX_r); dpsi[psiIndex][1]= ComplexT(c*gY_r -s*gY_i, c*gY_i +s*gY_r); dpsi[psiIndex][2]= ComplexT(c*gZ_r -s*gZ_i, c*gZ_i +s*gZ_r); d2psi[psiIndex] = ComplexT(c*lap_r-s*lap_i,c*lap_i+s*lap_r); } } /** assign_vgl_from_l can be used when myL is precomputed and myV,myG,myL in cartesian */ template<typename VV, typename GV> inline void assign_vgl_from_l(const PointType& r, VV& psi, GV& dpsi, VV& d2psi) { constexpr ST two(2); const ST x=r[0], y=r[1], z=r[2]; const ST* restrict k0=myKcart.data(0); const ST* restrict k1=myKcart.data(1); const ST* restrict k2=myKcart.data(2); const ST* restrict g0=myG.data(0); const ST* restrict g1=myG.data(1); const ST* restrict g2=myG.data(2); const size_t N=last_spo-first_spo; #pragma omp simd for (size_t j=0; j<N; ++j) { const size_t jr=j<<1; const size_t ji=jr+1; const ST kX=k0[j]; const ST kY=k1[j]; const ST kZ=k2[j]; const ST val_r=myV[jr]; const ST val_i=myV[ji]; //phase ST s, c; sincos(-(x*kX+y*kY+z*kZ),&s,&c); //dot(PrimLattice.G,myG[j]) const ST dX_r = g0[jr]; const ST dY_r = g1[jr]; const ST dZ_r = g2[jr]; const ST dX_i = g0[ji]; const ST dY_i = g1[ji]; const ST dZ_i = g2[ji]; // \f$\nabla \psi_r + {\bf k}\psi_i\f$ const ST gX_r=dX_r+val_i*kX; const ST gY_r=dY_r+val_i*kY; const ST gZ_r=dZ_r+val_i*kZ; const ST gX_i=dX_i-val_r*kX; const ST gY_i=dY_i-val_r*kY; const ST gZ_i=dZ_i-val_r*kZ; const ST lap_r=myL[jr]+mKK[j]*val_r+two*(kX*dX_i+kY*dY_i+kZ*dZ_i); const ST lap_i=myL[ji]+mKK[j]*val_i-two*(kX*dX_r+kY*dY_r+kZ*dZ_r); const size_t psiIndex=j+first_spo; psi[psiIndex ] = ComplexT(c*val_r-s*val_i,c*val_i+s*val_r); dpsi[psiIndex][0]= ComplexT(c*gX_r -s*gX_i, c*gX_i +s*gX_r); dpsi[psiIndex][1]= ComplexT(c*gY_r -s*gY_i, c*gY_i +s*gY_r); dpsi[psiIndex][2]= ComplexT(c*gZ_r -s*gZ_i, c*gZ_i +s*gZ_r); d2psi[psiIndex] = ComplexT(c*lap_r-s*lap_i,c*lap_i+s*lap_r); } } template<typename VV, typename GV> inline void evaluate_vgl(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, VV& d2psi) { const PointType& r=P.activeR(iat); PointType ru(PrimLattice.toUnit_floor(r)); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vgh(SplineInst->spline_m,ru,myV,myG,myH,first,last); assign_vgl(r,psi,dpsi,d2psi,first/2,last/2); } } template<typename VV, typename GV, typename GGV> void assign_vgh(const PointType& r, VV& psi, GV& dpsi, GGV& grad_grad_psi, int first = 0, int last = -1) const { // protect last last = last<0 ? kPoints.size() : (last>kPoints.size() ? kPoints.size() : last); const ST g00=PrimLattice.G(0), g01=PrimLattice.G(1), g02=PrimLattice.G(2), g10=PrimLattice.G(3), g11=PrimLattice.G(4), g12=PrimLattice.G(5), g20=PrimLattice.G(6), g21=PrimLattice.G(7), g22=PrimLattice.G(8); const ST x=r[0], y=r[1], z=r[2]; const ST* restrict k0=myKcart.data(0); const ST* restrict k1=myKcart.data(1); const ST* restrict k2=myKcart.data(2); const ST* restrict g0=myG.data(0); const ST* restrict g1=myG.data(1); const ST* restrict g2=myG.data(2); const ST* restrict h00=myH.data(0); const ST* restrict h01=myH.data(1); const ST* restrict h02=myH.data(2); const ST* restrict h11=myH.data(3); const ST* restrict h12=myH.data(4); const ST* restrict h22=myH.data(5); #pragma omp simd for (size_t j=first; j<last; ++j) { int jr=j<<1; int ji=jr+1; const ST kX=k0[j]; const ST kY=k1[j]; const ST kZ=k2[j]; const ST val_r=myV[jr]; const ST val_i=myV[ji]; //phase ST s, c; sincos(-(x*kX+y*kY+z*kZ),&s,&c); //dot(PrimLattice.G,myG[j]) const ST dX_r = g00*g0[jr]+g01*g1[jr]+g02*g2[jr]; const ST dY_r = g10*g0[jr]+g11*g1[jr]+g12*g2[jr]; const ST dZ_r = g20*g0[jr]+g21*g1[jr]+g22*g2[jr]; const ST dX_i = g00*g0[ji]+g01*g1[ji]+g02*g2[ji]; const ST dY_i = g10*g0[ji]+g11*g1[ji]+g12*g2[ji]; const ST dZ_i = g20*g0[ji]+g21*g1[ji]+g22*g2[ji]; // \f$\nabla \psi_r + {\bf k}\psi_i\f$ const ST gX_r=dX_r+val_i*kX; const ST gY_r=dY_r+val_i*kY; const ST gZ_r=dZ_r+val_i*kZ; const ST gX_i=dX_i-val_r*kX; const ST gY_i=dY_i-val_r*kY; const ST gZ_i=dZ_i-val_r*kZ; const size_t psiIndex=j+first_spo; psi[psiIndex] =ComplexT(c*val_r-s*val_i,c*val_i+s*val_r); dpsi[psiIndex][0]=ComplexT(c*gX_r -s*gX_i, c*gX_i +s*gX_r); dpsi[psiIndex][1]=ComplexT(c*gY_r -s*gY_i, c*gY_i +s*gY_r); dpsi[psiIndex][2]=ComplexT(c*gZ_r -s*gZ_i, c*gZ_i +s*gZ_r); const ST h_xx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g00,g01,g02)+kX*(gX_i+dX_i); const ST h_xy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g10,g11,g12)+kX*(gY_i+dY_i); const ST h_xz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g00,g01,g02,g20,g21,g22)+kX*(gZ_i+dZ_i); const ST h_yx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g00,g01,g02)+kY*(gX_i+dX_i); const ST h_yy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g10,g11,g12)+kY*(gY_i+dY_i); const ST h_yz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g10,g11,g12,g20,g21,g22)+kY*(gZ_i+dZ_i); const ST h_zx_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g00,g01,g02)+kZ*(gX_i+dX_i); const ST h_zy_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g10,g11,g12)+kZ*(gY_i+dY_i); const ST h_zz_r=v_m_v(h00[jr],h01[jr],h02[jr],h11[jr],h12[jr],h22[jr],g20,g21,g22,g20,g21,g22)+kZ*(gZ_i+dZ_i); const ST h_xx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g00,g01,g02)-kX*(gX_r+dX_r); const ST h_xy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g10,g11,g12)-kX*(gY_r+dY_r); const ST h_xz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g00,g01,g02,g20,g21,g22)-kX*(gZ_r+dZ_r); const ST h_yx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g00,g01,g02)-kY*(gX_r+dX_r); const ST h_yy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g10,g11,g12)-kY*(gY_r+dY_r); const ST h_yz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g10,g11,g12,g20,g21,g22)-kY*(gZ_r+dZ_r); const ST h_zx_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g00,g01,g02)-kZ*(gX_r+dX_r); const ST h_zy_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g10,g11,g12)-kZ*(gY_r+dY_r); const ST h_zz_i=v_m_v(h00[ji],h01[ji],h02[ji],h11[ji],h12[ji],h22[ji],g20,g21,g22,g20,g21,g22)-kZ*(gZ_r+dZ_r); grad_grad_psi[psiIndex][0]=ComplexT(c*h_xx_r-s*h_xx_i, c*h_xx_i+s*h_xx_r); grad_grad_psi[psiIndex][1]=ComplexT(c*h_xy_r-s*h_xy_i, c*h_xy_i+s*h_xy_r); grad_grad_psi[psiIndex][2]=ComplexT(c*h_xz_r-s*h_xz_i, c*h_xz_i+s*h_xz_r); grad_grad_psi[psiIndex][3]=ComplexT(c*h_yx_r-s*h_yx_i, c*h_yx_i+s*h_yx_r); grad_grad_psi[psiIndex][4]=ComplexT(c*h_yy_r-s*h_yy_i, c*h_yy_i+s*h_yy_r); grad_grad_psi[psiIndex][5]=ComplexT(c*h_yz_r-s*h_yz_i, c*h_yz_i+s*h_yz_r); grad_grad_psi[psiIndex][6]=ComplexT(c*h_zx_r-s*h_zx_i, c*h_zx_i+s*h_zx_r); grad_grad_psi[psiIndex][7]=ComplexT(c*h_zy_r-s*h_zy_i, c*h_zy_i+s*h_zy_r); grad_grad_psi[psiIndex][8]=ComplexT(c*h_zz_r-s*h_zz_i, c*h_zz_i+s*h_zz_r); } } template<typename VV, typename GV, typename GGV> void evaluate_vgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi) { const PointType& r=P.activeR(iat); PointType ru(PrimLattice.toUnit_floor(r)); #pragma omp parallel { int first, last; FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last); spline2::evaluate3d_vgh(SplineInst->spline_m,ru,myV,myG,myH,first,last); assign_vgh(r,psi,dpsi,grad_grad_psi,first/2,last/2); } } }; } #endif
lighting.c
#include "image.h" #include <stdlib.h> #include <assert.h> #include <memory.h> #include <kazmath/vec3.h> static float _occlusion_scale = 1.0f; void heman_lighting_set_occlusion_scale(float s) { _occlusion_scale = s; } heman_image* heman_lighting_compute_normals(heman_image* heightmap) { assert(heightmap->nbands == 1); int width = heightmap->width; int height = heightmap->height; heman_image* result = heman_image_create(width, height, 3); HEMAN_FLOAT invh = 1.0f / height; HEMAN_FLOAT invw = 1.0f / width; int maxx = width - 1; int maxy = height - 1; kmVec3* normals = (kmVec3*) result->data; #pragma omp parallel for for (int y = 0; y < height; y++) { HEMAN_FLOAT v = y * invh; int y1 = MIN(y + 1, maxy); kmVec3 p; kmVec3 px; kmVec3 py; kmVec3* n = normals + y * width; for (int x = 0; x < width; x++, n++) { HEMAN_FLOAT u = x * invw; int x1 = MIN(x + 1, maxx); p.x = u; p.y = v; p.z = *heman_image_texel(heightmap, x, y); px.x = u + invw; px.y = v; px.z = *heman_image_texel(heightmap, x1, y); py.x = u; py.y = v + invh; py.z = *heman_image_texel(heightmap, x, y1); kmVec3Subtract(&px, &px, &p); kmVec3Subtract(&py, &py, &p); kmVec3Cross(n, &px, &py); kmVec3Normalize(n, n); n->y *= -1; } } return result; } heman_image* heman_lighting_apply(heman_image* heightmap, heman_image* albedo, float occlusion, float diffuse, float diffuse_softening, const float* light_position) { assert(heightmap->nbands == 1); int width = heightmap->width; int height = heightmap->height; heman_image* final = heman_image_create(width, height, 3); heman_image* normals = heman_lighting_compute_normals(heightmap); heman_image* occ = heman_lighting_compute_occlusion(heightmap); if (albedo) { assert(albedo->nbands == 3); assert(albedo->width == width); assert(albedo->height == height); } static float default_pos[] = {-0.5f, 0.5f, 1.0f}; if (!light_position) { light_position = default_pos; } kmVec3* colors = (kmVec3*) final->data; HEMAN_FLOAT invgamma = 1.0f / _gamma; kmVec3 L; L.x = light_position[0]; L.y = light_position[1]; L.z = light_position[2]; kmVec3Normalize(&L, &L); #pragma omp parallel for for (int y = 0; y < height; y++) { kmVec3* color = colors + y * width; for (int x = 0; x < width; x++, color++) { kmVec3* N = (kmVec3*) heman_image_texel(normals, x, y); kmVec3Lerp(N, N, &KM_VEC3_POS_Z, diffuse_softening); HEMAN_FLOAT df = 1 - diffuse * (1 - kmClamp(kmVec3Dot(N, &L), 0, 1)); HEMAN_FLOAT of = 1 - occlusion * (1 - *heman_image_texel(occ, x, y)); if (albedo) { *color = *((kmVec3*) heman_image_texel(albedo, x, y)); } else { color->x = color->y = color->z = 1; } color->x = pow(color->x, _gamma); color->y = pow(color->y, _gamma); color->z = pow(color->z, _gamma); kmVec3Scale(color, color, df * of); color->x = pow(color->x, invgamma); color->y = pow(color->y, invgamma); color->z = pow(color->z, invgamma); } } heman_image_destroy(normals); heman_image_destroy(occ); return final; } #define NUM_SCANS (16) #define INV_SCANS (1.0f / 16.0f) static HEMAN_FLOAT azimuth_slope(kmVec3 a, kmVec3 b) { kmVec3 d; kmVec3Subtract(&d, &a, &b); HEMAN_FLOAT x = kmVec3Length(&d); HEMAN_FLOAT y = b.z - a.z; return y / x; } static HEMAN_FLOAT compute_occlusion(kmVec3 thispt, kmVec3 horizonpt) { kmVec3 direction; kmVec3Subtract(&direction, &horizonpt, &thispt); kmVec3Normalize(&direction, &direction); HEMAN_FLOAT dot = kmVec3Dot(&direction, &KM_VEC3_POS_Z); return atan(MAX(dot, 0.0f)) * TWO_OVER_PI; } static void horizon_scan( heman_image* heightmap, heman_image* result, int* startpts, int dx, int dy) { int w = heightmap->width, h = heightmap->height; int sx = SGN(dx), sy = SGN(dy); int ax = abs(dx), ay = abs(dy); // Generate the start positions for each sweep line. The start positions // occur just outside the image boundary. int nsweeps = ay * w + ax * h - (ax + ay - 1); int* p = startpts; for (int x = -ax; x < w - ax; x++) { for (int y = -ay; y < h - ay; y++) { if (x >= 0 && x < w && y >= 0 && y < h) { continue; } *p++ = (sx < 0) ? (w - x - 1) : x; *p++ = (sy < 0) ? (h - y - 1) : y; } } assert(nsweeps == (p - startpts) / 2); // Compute the number of steps by doing a mock sweep. int pathlen = 0; int i = startpts[0], j = startpts[1]; do { i += dx; j += dy; ++pathlen; } while (i >= 0 && i < w && j >= 0 && j < h); // Each cell in the grid has a certain width and height. These can be // multiplied by row / column indices to get world-space X / Y values, // which are in the same coordinate system as the height values. HEMAN_FLOAT cellw = _occlusion_scale / MAX(w, h); HEMAN_FLOAT cellh = _occlusion_scale / MAX(w, h); // Initialize a stack of candidate horizon points, one for each sweep. In a // serial implementation we wouldn't need to allocate this much memory, but // we're trying to make life easy for multithreading. kmVec3* hull_buffer = malloc(sizeof(kmVec3) * pathlen * nsweeps); // Finally, perform the actual sweeps. We're careful to touch each pixel // exactly once, which makes this embarassingly threadable. #pragma omp parallel for for (int sweep = 0; sweep < nsweeps; sweep++) { kmVec3* convex_hull = hull_buffer + sweep * pathlen; int* p = startpts + sweep * 2; int i = p[0]; int j = p[1]; kmVec3 thispt, horizonpt; thispt.x = i * cellw; thispt.y = j * cellh; thispt.z = *heman_image_texel(heightmap, EDGE(i, w), EDGE(j, h)); int stack_top = 0; convex_hull[0] = thispt; i += dx, j += dy; while (i >= 0 && i < w && j >= 0 && j < h) { thispt.x = i * cellw; thispt.y = j * cellh; thispt.z = *heman_image_texel(heightmap, i, j); while (stack_top > 0) { HEMAN_FLOAT s1 = azimuth_slope(thispt, convex_hull[stack_top]); HEMAN_FLOAT s2 = azimuth_slope(thispt, convex_hull[stack_top - 1]); if (s1 >= s2) { break; } stack_top--; } horizonpt = convex_hull[stack_top++]; assert(stack_top < pathlen); convex_hull[stack_top] = thispt; HEMAN_FLOAT occlusion = compute_occlusion(thispt, horizonpt); *heman_image_texel(result, i, j) += INV_SCANS * occlusion; i += dx; j += dy; } } free(hull_buffer); } heman_image* heman_lighting_compute_occlusion(heman_image* heightmap) { assert(heightmap->nbands == 1); int width = heightmap->width; int height = heightmap->height; heman_image* result = heman_image_create(width, height, 1); memset(result->data, 0, sizeof(HEMAN_FLOAT) * width * height); // Define sixteen 2D vectors, used for the sweep directions. const int scans[NUM_SCANS * 2] = { 1, 0, 0, 1, -1, 0, 0, -1, // Rook 1, 1, -1, -1, 1, -1, -1, 1, // Bishop 2, 1, 2, -1, -2, 1, -2, -1, 1, 2, 1, -2, -1, 2, -1, -2 // Knight }; // Allocate memory that will store the starting positions of each sweep. int* startpts = malloc(sizeof(int) * 2 * 3 * kmMax(width, height)); // Make each sweep serially, accumulating the result. for (int i = 0; i < NUM_SCANS; i++) { int dx = scans[i * 2]; int dy = scans[i * 2 + 1]; horizon_scan(heightmap, result, startpts, dx, dy); } // Invert the occlusion values and make sure they are valid. for (int i = 0; i < width * height; i++) { result->data[i] = 1.0f - result->data[i]; assert(result->data[i] >= 0.0 && result->data[i] <= 1.0f); } free(startpts); return result; }
sstruct_matrix.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.39 $ ***********************************************************************EHEADER*/ /****************************************************************************** * * Member functions for hypre_SStructPMatrix class. * *****************************************************************************/ #include "_hypre_sstruct_mv.h" /*========================================================================== * SStructPMatrix routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixRef( hypre_SStructPMatrix *matrix, hypre_SStructPMatrix **matrix_ref ) { hypre_SStructPMatrixRefCount(matrix) ++; *matrix_ref = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixCreate( MPI_Comm comm, hypre_SStructPGrid *pgrid, hypre_SStructStencil **stencils, hypre_SStructPMatrix **pmatrix_ptr ) { hypre_SStructPMatrix *pmatrix; HYPRE_Int nvars; HYPRE_Int **smaps; hypre_StructStencil ***sstencils; hypre_StructMatrix ***smatrices; HYPRE_Int **symmetric; hypre_StructStencil *sstencil; HYPRE_Int *vars; hypre_Index *sstencil_shape; HYPRE_Int sstencil_size; HYPRE_Int new_dim; HYPRE_Int *new_sizes; hypre_Index **new_shapes; HYPRE_Int size; hypre_StructGrid *sgrid; HYPRE_Int vi, vj; HYPRE_Int i, j, k; pmatrix = hypre_TAlloc(hypre_SStructPMatrix, 1); hypre_SStructPMatrixComm(pmatrix) = comm; hypre_SStructPMatrixPGrid(pmatrix) = pgrid; hypre_SStructPMatrixStencils(pmatrix) = stencils; nvars = hypre_SStructPGridNVars(pgrid); hypre_SStructPMatrixNVars(pmatrix) = nvars; /* create sstencils */ smaps = hypre_TAlloc(HYPRE_Int *, nvars); sstencils = hypre_TAlloc(hypre_StructStencil **, nvars); new_sizes = hypre_TAlloc(HYPRE_Int, nvars); new_shapes = hypre_TAlloc(hypre_Index *, nvars); size = 0; for (vi = 0; vi < nvars; vi++) { sstencils[vi] = hypre_TAlloc(hypre_StructStencil *, nvars); for (vj = 0; vj < nvars; vj++) { sstencils[vi][vj] = NULL; new_sizes[vj] = 0; } sstencil = hypre_SStructStencilSStencil(stencils[vi]); vars = hypre_SStructStencilVars(stencils[vi]); sstencil_shape = hypre_StructStencilShape(sstencil); sstencil_size = hypre_StructStencilSize(sstencil); smaps[vi] = hypre_TAlloc(HYPRE_Int, sstencil_size); for (i = 0; i < sstencil_size; i++) { j = vars[i]; new_sizes[j]++; } for (vj = 0; vj < nvars; vj++) { if (new_sizes[vj]) { new_shapes[vj] = hypre_TAlloc(hypre_Index, new_sizes[vj]); new_sizes[vj] = 0; } } for (i = 0; i < sstencil_size; i++) { j = vars[i]; k = new_sizes[j]; hypre_CopyIndex(sstencil_shape[i], new_shapes[j][k]); smaps[vi][i] = k; new_sizes[j]++; } new_dim = hypre_StructStencilDim(sstencil); for (vj = 0; vj < nvars; vj++) { if (new_sizes[vj]) { sstencils[vi][vj] = hypre_StructStencilCreate(new_dim, new_sizes[vj], new_shapes[vj]); } size = hypre_max(size, new_sizes[vj]); } } hypre_SStructPMatrixSMaps(pmatrix) = smaps; hypre_SStructPMatrixSStencils(pmatrix) = sstencils; hypre_TFree(new_sizes); hypre_TFree(new_shapes); /* create smatrices */ smatrices = hypre_TAlloc(hypre_StructMatrix **, nvars); for (vi = 0; vi < nvars; vi++) { smatrices[vi] = hypre_TAlloc(hypre_StructMatrix *, nvars); for (vj = 0; vj < nvars; vj++) { smatrices[vi][vj] = NULL; if (sstencils[vi][vj] != NULL) { sgrid = hypre_SStructPGridSGrid(pgrid, vi); smatrices[vi][vj] = hypre_StructMatrixCreate(comm, sgrid, sstencils[vi][vj]); } } } hypre_SStructPMatrixSMatrices(pmatrix) = smatrices; /* create symmetric */ symmetric = hypre_TAlloc(HYPRE_Int *, nvars); for (vi = 0; vi < nvars; vi++) { symmetric[vi] = hypre_TAlloc(HYPRE_Int, nvars); for (vj = 0; vj < nvars; vj++) { symmetric[vi][vj] = 0; } } hypre_SStructPMatrixSymmetric(pmatrix) = symmetric; hypre_SStructPMatrixSEntriesSize(pmatrix) = size; hypre_SStructPMatrixSEntries(pmatrix) = hypre_TAlloc(HYPRE_Int, size); hypre_SStructPMatrixRefCount(pmatrix) = 1; *pmatrix_ptr = pmatrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixDestroy( hypre_SStructPMatrix *pmatrix ) { hypre_SStructStencil **stencils; HYPRE_Int nvars; HYPRE_Int **smaps; hypre_StructStencil ***sstencils; hypre_StructMatrix ***smatrices; HYPRE_Int **symmetric; HYPRE_Int vi, vj; if (pmatrix) { hypre_SStructPMatrixRefCount(pmatrix) --; if (hypre_SStructPMatrixRefCount(pmatrix) == 0) { stencils = hypre_SStructPMatrixStencils(pmatrix); nvars = hypre_SStructPMatrixNVars(pmatrix); smaps = hypre_SStructPMatrixSMaps(pmatrix); sstencils = hypre_SStructPMatrixSStencils(pmatrix); smatrices = hypre_SStructPMatrixSMatrices(pmatrix); symmetric = hypre_SStructPMatrixSymmetric(pmatrix); for (vi = 0; vi < nvars; vi++) { HYPRE_SStructStencilDestroy(stencils[vi]); hypre_TFree(smaps[vi]); for (vj = 0; vj < nvars; vj++) { hypre_StructStencilDestroy(sstencils[vi][vj]); hypre_StructMatrixDestroy(smatrices[vi][vj]); } hypre_TFree(sstencils[vi]); hypre_TFree(smatrices[vi]); hypre_TFree(symmetric[vi]); } hypre_TFree(stencils); hypre_TFree(smaps); hypre_TFree(sstencils); hypre_TFree(smatrices); hypre_TFree(symmetric); hypre_TFree(hypre_SStructPMatrixSEntries(pmatrix)); hypre_TFree(pmatrix); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixInitialize( hypre_SStructPMatrix *pmatrix ) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); HYPRE_Int **symmetric = hypre_SStructPMatrixSymmetric(pmatrix); HYPRE_Int num_ghost[6]; hypre_StructMatrix *smatrix; HYPRE_Int vi, vj, d, ndim; ndim = hypre_SStructPMatrixNDim(pmatrix); for (d = 0; d < ndim; d++) { num_ghost[2*d] = num_ghost[2*d+1] = 1; } for (d = ndim; d < 3; d++) { num_ghost[2*d] = num_ghost[2*d+1] = 0; } for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { HYPRE_StructMatrixSetSymmetric(smatrix, symmetric[vi][vj]); hypre_StructMatrixSetNumGhost(smatrix, num_ghost); hypre_StructMatrixInitialize(smatrix); /* needed to get AddTo accumulation correct between processors */ hypre_StructMatrixClearGhostValues(smatrix); } } } hypre_SStructPMatrixAccumulated(pmatrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixSetValues( hypre_SStructPMatrix *pmatrix, hypre_Index index, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, double *values, HYPRE_Int action ) { hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var); HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_StructMatrix *smatrix; hypre_BoxArray *grid_boxes; hypre_Box *box; HYPRE_Int *sentries; HYPRE_Int i; smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]); sentries = hypre_SStructPMatrixSEntries(pmatrix); for (i = 0; i < nentries; i++) { sentries[i] = smap[entries[i]]; } /* set values inside the grid */ hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values, action, -1, 0); /* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */ if (action != 0) { /* AddTo/Get */ hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix); hypre_Index varoffset; HYPRE_Int done = 0; grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); if ((hypre_IndexX(index) >= hypre_BoxIMinX(box)) && (hypre_IndexX(index) <= hypre_BoxIMaxX(box)) && (hypre_IndexY(index) >= hypre_BoxIMinY(box)) && (hypre_IndexY(index) <= hypre_BoxIMaxY(box)) && (hypre_IndexZ(index) >= hypre_BoxIMinZ(box)) && (hypre_IndexZ(index) <= hypre_BoxIMaxZ(box)) ) { done = 1; break; } } if (!done) { hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var), hypre_SStructPGridNDim(pgrid), varoffset); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); if ((hypre_IndexX(index) >= hypre_BoxIMinX(box) - hypre_IndexX(varoffset)) && (hypre_IndexX(index) <= hypre_BoxIMaxX(box) + hypre_IndexX(varoffset)) && (hypre_IndexY(index) >= hypre_BoxIMinY(box) - hypre_IndexY(varoffset)) && (hypre_IndexY(index) <= hypre_BoxIMaxY(box) + hypre_IndexY(varoffset)) && (hypre_IndexZ(index) >= hypre_BoxIMinZ(box) - hypre_IndexZ(varoffset)) && (hypre_IndexZ(index) <= hypre_BoxIMaxZ(box) + hypre_IndexZ(varoffset)) ) { hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values, action, i, 1); break; } } } } else { /* Set */ grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); hypre_ForBoxI(i, grid_boxes) { box = hypre_BoxArrayBox(grid_boxes, i); if ((hypre_IndexX(index) < hypre_BoxIMinX(box)) || (hypre_IndexX(index) > hypre_BoxIMaxX(box)) || (hypre_IndexY(index) < hypre_BoxIMinY(box)) || (hypre_IndexY(index) > hypre_BoxIMaxY(box)) || (hypre_IndexZ(index) < hypre_BoxIMinZ(box)) || (hypre_IndexZ(index) > hypre_BoxIMaxZ(box)) ) { hypre_StructMatrixClearValues(smatrix, index, nentries, sentries, i, 1); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixSetBoxValues( hypre_SStructPMatrix *pmatrix, hypre_Index ilower, hypre_Index iupper, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, double *values, HYPRE_Int action ) { hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var); HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_StructMatrix *smatrix; hypre_BoxArray *grid_boxes; hypre_Box *box; hypre_Box *value_box; HYPRE_Int *sentries; HYPRE_Int i, j; smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]); box = hypre_BoxCreate(); hypre_CopyIndex(ilower, hypre_BoxIMin(box)); hypre_CopyIndex(iupper, hypre_BoxIMax(box)); value_box = box; sentries = hypre_SStructPMatrixSEntries(pmatrix); for (i = 0; i < nentries; i++) { sentries[i] = smap[entries[i]]; } /* set values inside the grid */ hypre_StructMatrixSetBoxValues(smatrix, box, value_box, nentries, sentries, values, action, -1, 0); /* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */ if (action != 0) { /* AddTo/Get */ hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix); hypre_Index varoffset; hypre_BoxArray *left_boxes, *done_boxes, *temp_boxes; hypre_Box *left_box, *done_box, *int_box; hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var), hypre_SStructPGridNDim(pgrid), varoffset); grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); left_boxes = hypre_BoxArrayCreate(1); done_boxes = hypre_BoxArrayCreate(2); temp_boxes = hypre_BoxArrayCreate(0); /* done_box always points to the first box in done_boxes */ done_box = hypre_BoxArrayBox(done_boxes, 0); /* int_box always points to the second box in done_boxes */ int_box = hypre_BoxArrayBox(done_boxes, 1); hypre_CopyBox(box, hypre_BoxArrayBox(left_boxes, 0)); hypre_BoxArraySetSize(left_boxes, 1); hypre_SubtractBoxArrays(left_boxes, grid_boxes, temp_boxes); hypre_BoxArraySetSize(done_boxes, 0); hypre_ForBoxI(i, grid_boxes) { hypre_SubtractBoxArrays(left_boxes, done_boxes, temp_boxes); hypre_BoxArraySetSize(done_boxes, 1); hypre_CopyBox(hypre_BoxArrayBox(grid_boxes, i), done_box); hypre_BoxIMinX(done_box) -= hypre_IndexX(varoffset); hypre_BoxIMinY(done_box) -= hypre_IndexY(varoffset); hypre_BoxIMinZ(done_box) -= hypre_IndexZ(varoffset); hypre_BoxIMaxX(done_box) += hypre_IndexX(varoffset); hypre_BoxIMaxY(done_box) += hypre_IndexY(varoffset); hypre_BoxIMaxZ(done_box) += hypre_IndexZ(varoffset); hypre_ForBoxI(j, left_boxes) { left_box = hypre_BoxArrayBox(left_boxes, j); hypre_IntersectBoxes(left_box, done_box, int_box); hypre_StructMatrixSetBoxValues(smatrix, int_box, value_box, nentries, sentries, values, action, i, 1); } } hypre_BoxArrayDestroy(left_boxes); hypre_BoxArrayDestroy(done_boxes); hypre_BoxArrayDestroy(temp_boxes); } else { /* Set */ hypre_BoxArray *diff_boxes; hypre_Box *grid_box, *diff_box; grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix)); diff_boxes = hypre_BoxArrayCreate(0); hypre_ForBoxI(i, grid_boxes) { grid_box = hypre_BoxArrayBox(grid_boxes, i); hypre_BoxArraySetSize(diff_boxes, 0); hypre_SubtractBoxes(box, grid_box, diff_boxes); hypre_ForBoxI(j, diff_boxes) { diff_box = hypre_BoxArrayBox(diff_boxes, j); hypre_StructMatrixClearBoxValues(smatrix, diff_box, nentries, sentries, i, 1); } } hypre_BoxArrayDestroy(diff_boxes); } hypre_BoxDestroy(box); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixAccumulate( hypre_SStructPMatrix *pmatrix ) { hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix); HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); HYPRE_Int ndim = hypre_SStructPGridNDim(pgrid); HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid); hypre_StructMatrix *smatrix; hypre_Index varoffset; HYPRE_Int num_ghost[6]; hypre_StructGrid *sgrid; HYPRE_Int vi, vj, d; hypre_CommInfo *comm_info; hypre_CommPkg *comm_pkg; hypre_CommHandle *comm_handle; /* if values already accumulated, just return */ if (hypre_SStructPMatrixAccumulated(pmatrix)) { return hypre_error_flag; } for (d = ndim; d < 3; d++) { num_ghost[2*d] = num_ghost[2*d+1] = 0; } for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { sgrid = hypre_StructMatrixGrid(smatrix); /* assumes vi and vj vartypes are the same */ hypre_SStructVariableGetOffset(vartypes[vi], ndim, varoffset); for (d = 0; d < ndim; d++) { num_ghost[2*d] = num_ghost[2*d+1] = hypre_IndexD(varoffset, d); } /* accumulate values from AddTo */ hypre_CreateCommInfoFromNumGhost(sgrid, num_ghost, &comm_info); hypre_CommPkgCreate(comm_info, hypre_StructMatrixDataSpace(smatrix), hypre_StructMatrixDataSpace(smatrix), hypre_StructMatrixNumValues(smatrix), NULL, 1, hypre_StructMatrixComm(smatrix), &comm_pkg); hypre_InitializeCommunication(comm_pkg, hypre_StructMatrixData(smatrix), hypre_StructMatrixData(smatrix), 1, 0, &comm_handle); hypre_FinalizeCommunication(comm_handle); hypre_CommInfoDestroy(comm_info); hypre_CommPkgDestroy(comm_pkg); } } } hypre_SStructPMatrixAccumulated(pmatrix) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixAssemble( hypre_SStructPMatrix *pmatrix ) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); hypre_StructMatrix *smatrix; HYPRE_Int vi, vj; hypre_SStructPMatrixAccumulate(pmatrix); for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { hypre_StructMatrixClearGhostValues(smatrix); hypre_StructMatrixAssemble(smatrix); } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixSetSymmetric( hypre_SStructPMatrix *pmatrix, HYPRE_Int var, HYPRE_Int to_var, HYPRE_Int symmetric ) { HYPRE_Int **pmsymmetric = hypre_SStructPMatrixSymmetric(pmatrix); HYPRE_Int vstart = var; HYPRE_Int vsize = 1; HYPRE_Int tstart = to_var; HYPRE_Int tsize = 1; HYPRE_Int v, t; if (var == -1) { vstart = 0; vsize = hypre_SStructPMatrixNVars(pmatrix); } if (to_var == -1) { tstart = 0; tsize = hypre_SStructPMatrixNVars(pmatrix); } for (v = vstart; v < vsize; v++) { for (t = tstart; t < tsize; t++) { pmsymmetric[v][t] = symmetric; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructPMatrixPrint( const char *filename, hypre_SStructPMatrix *pmatrix, HYPRE_Int all ) { HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix); hypre_StructMatrix *smatrix; HYPRE_Int vi, vj; char new_filename[255]; for (vi = 0; vi < nvars; vi++) { for (vj = 0; vj < nvars; vj++) { smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj); if (smatrix != NULL) { hypre_sprintf(new_filename, "%s.%02d.%02d", filename, vi, vj); hypre_StructMatrixPrint(new_filename, smatrix, all); } } } return hypre_error_flag; } /*========================================================================== * SStructUMatrix routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixInitialize( hypre_SStructMatrix *matrix ) { HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_Int nparts = hypre_SStructGraphNParts(graph); hypre_SStructPGrid **pgrids = hypre_SStructGraphPGrids(graph); hypre_SStructStencil ***stencils = hypre_SStructGraphStencils(graph); HYPRE_Int nUventries = hypre_SStructGraphNUVEntries(graph); HYPRE_Int *iUventries = hypre_SStructGraphIUVEntries(graph); hypre_SStructUVEntry **Uventries = hypre_SStructGraphUVEntries(graph); HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid); hypre_StructGrid *sgrid; hypre_SStructStencil *stencil; HYPRE_Int *split; HYPRE_Int nvars; HYPRE_Int nrows, rowstart, nnzs ; HYPRE_Int part, var, entry, b, m, mi; HYPRE_Int *row_sizes; HYPRE_Int max_row_size; hypre_BoxArray *boxes; hypre_Box *box; hypre_Box *ghost_box; hypre_IndexRef start; hypre_Index loop_size, stride; HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR); /* GEC1002 the ghlocalsize is used to set the number of rows */ if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT) { rowstart = hypre_SStructGridGhstartRank(grid); nrows = hypre_SStructGridGhlocalSize(grid) ; } else /* matrix_type == HYPRE_PARCSR */ { rowstart = hypre_SStructGridStartRank(grid); nrows = hypre_SStructGridLocalSize(grid); } /* set row sizes */ m = 0; max_row_size = 0; ghost_box = hypre_BoxCreate(); row_sizes = hypre_CTAlloc(HYPRE_Int, nrows); hypre_SetIndex(stride, 1, 1, 1); for (part = 0; part < nparts; part++) { nvars = hypre_SStructPGridNVars(pgrids[part]); for (var = 0; var < nvars; var++) { sgrid = hypre_SStructPGridSGrid(pgrids[part], var); stencil = stencils[part][var]; split = hypre_SStructMatrixSplit(matrix, part, var); nnzs = 0; for (entry = 0; entry < hypre_SStructStencilSize(stencil); entry++) { if (split[entry] == -1) { nnzs++; } } #if 0 /* TODO: For now, assume stencil is full/complete */ if (hypre_SStructMatrixSymmetric(matrix)) { nnzs = 2*nnzs - 1; } #endif boxes = hypre_StructGridBoxes(sgrid); hypre_ForBoxI(b, boxes) { box = hypre_BoxArrayBox(boxes, b); hypre_CopyBox(box, ghost_box); if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT) { hypre_BoxExpand(ghost_box, hypre_StructGridNumGhost(sgrid)); } start = hypre_BoxIMin(box); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop1Begin(hypre_SStructMatrixNDim(matrix), loop_size, ghost_box, start, stride, mi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi ) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(mi) { row_sizes[m+mi] = nnzs; } hypre_BoxLoop1End(mi); m += hypre_BoxVolume(ghost_box); } max_row_size = hypre_max(max_row_size, nnzs); if (nvneighbors[part][var]) { max_row_size = hypre_max(max_row_size, hypre_SStructStencilSize(stencil)); } } } hypre_BoxDestroy(ghost_box); /* GEC0902 essentially for each UVentry we figure out how many extra columns * we need to add to the rowsizes */ /* RDF: THREAD? */ for (entry = 0; entry < nUventries; entry++) { mi = iUventries[entry]; m = hypre_SStructUVEntryRank(Uventries[mi]) - rowstart; if ((m > -1) && (m < nrows)) { row_sizes[m] += hypre_SStructUVEntryNUEntries(Uventries[mi]); max_row_size = hypre_max(max_row_size, row_sizes[m]); } } /* ZTODO: Update row_sizes based on neighbor off-part couplings */ HYPRE_IJMatrixSetRowSizes (ijmatrix, (const HYPRE_Int *) row_sizes); hypre_TFree(row_sizes); hypre_SStructMatrixTmpColCoords(matrix) = hypre_CTAlloc(HYPRE_Int, max_row_size); hypre_SStructMatrixTmpCoeffs(matrix) = hypre_CTAlloc(double, max_row_size); /* GEC1002 at this point the processor has the partitioning (creation of ij) */ HYPRE_IJMatrixInitialize(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * * 9/09 - AB: modified to use the box manager - here we need to check the * neighbor box manager also *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixSetValues( hypre_SStructMatrix *matrix, HYPRE_Int part, hypre_Index index, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, double *values, HYPRE_Int action ) { HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph); hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_Index *shape = hypre_SStructStencilShape(stencil); HYPRE_Int size = hypre_SStructStencilSize(stencil); hypre_IndexRef offset; hypre_Index to_index; hypre_SStructUVEntry *Uventry; hypre_BoxManEntry *boxman_entry; hypre_SStructBoxManInfo *entry_info; HYPRE_Int row_coord; HYPRE_Int *col_coords; HYPRE_Int ncoeffs; double *coeffs; HYPRE_Int i, entry, Uverank; /* GEC1002 the matrix type */ HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix); hypre_SStructGridFindBoxManEntry(grid, part, index, var, &boxman_entry); /* if not local, check neighbors */ if (boxman_entry == NULL) hypre_SStructGridFindNborBoxManEntry(grid, part, index, var, &boxman_entry); if (boxman_entry == NULL) { hypre_error_in_arg(1); hypre_error_in_arg(2); hypre_error_in_arg(3); return hypre_error_flag; } else { hypre_BoxManEntryGetInfo(boxman_entry, (void **) &entry_info); } /* GEC1002 get the rank using the function with the type=matrixtype*/ hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &row_coord, matrix_type); col_coords = hypre_SStructMatrixTmpColCoords(matrix); coeffs = hypre_SStructMatrixTmpCoeffs(matrix); ncoeffs = 0; for (i = 0; i < nentries; i++) { entry = entries[i]; if (entry < size) { /* stencil entries */ offset = shape[entry]; hypre_IndexX(to_index) = hypre_IndexX(index) + hypre_IndexX(offset); hypre_IndexY(to_index) = hypre_IndexY(index) + hypre_IndexY(offset); hypre_IndexZ(to_index) = hypre_IndexZ(index) + hypre_IndexZ(offset); hypre_SStructGridFindBoxManEntry(dom_grid, part, to_index, vars[entry], &boxman_entry); /* if not local, check neighbors */ if (boxman_entry == NULL) hypre_SStructGridFindNborBoxManEntry(dom_grid, part, to_index, vars[entry], &boxman_entry); if (boxman_entry != NULL) { hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, to_index, &col_coords[ncoeffs],matrix_type); coeffs[ncoeffs] = values[i]; ncoeffs++; } } else { /* non-stencil entries */ entry -= size; hypre_SStructGraphGetUVEntryRank(graph, part, var, index, &Uverank); if (Uverank > -1) { Uventry = hypre_SStructGraphUVEntry(graph, Uverank); col_coords[ncoeffs] = hypre_SStructUVEntryToRank(Uventry, entry); coeffs[ncoeffs] = values[i]; ncoeffs++; } } } if (action > 0) { HYPRE_IJMatrixAddToValues(ijmatrix, 1, &ncoeffs, &row_coord, (const HYPRE_Int *) col_coords, (const double *) coeffs); } else if (action > -1) { HYPRE_IJMatrixSetValues(ijmatrix, 1, &ncoeffs, &row_coord, (const HYPRE_Int *) col_coords, (const double *) coeffs); } else { HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord, col_coords, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Note: Entries must all be of type stencil or non-stencil, but not both. * * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * 9/09 - AB: modified to use the box manager- here we need to check the * neighbor box manager also *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixSetBoxValues( hypre_SStructMatrix *matrix, HYPRE_Int part, hypre_Index ilower, hypre_Index iupper, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, double *values, HYPRE_Int action ) { HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph); hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var); HYPRE_Int *vars = hypre_SStructStencilVars(stencil); hypre_Index *shape = hypre_SStructStencilShape(stencil); HYPRE_Int size = hypre_SStructStencilSize(stencil); hypre_IndexRef offset; hypre_BoxManEntry **boxman_entries; HYPRE_Int nboxman_entries; hypre_BoxManEntry **boxman_to_entries; HYPRE_Int nboxman_to_entries; HYPRE_Int nrows; HYPRE_Int *ncols; HYPRE_Int *rows; HYPRE_Int *cols; double *ijvalues; hypre_Box *box; hypre_Box *to_box; hypre_Box *map_box; hypre_Box *int_box; hypre_Index index; hypre_Index rs, cs; HYPRE_Int sy, sz; HYPRE_Int row_base, col_base, val_base; HYPRE_Int e, entry, ii, jj, i, j, k; /* GEC1002 the matrix type */ HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix); box = hypre_BoxCreate(); /*------------------------------------------ * all stencil entries *------------------------------------------*/ if (entries[0] < size) { to_box = hypre_BoxCreate(); map_box = hypre_BoxCreate(); int_box = hypre_BoxCreate(); hypre_BoxSetExtents(box, ilower, iupper); nrows = hypre_BoxVolume(box)*nentries; ncols = hypre_CTAlloc(HYPRE_Int, nrows); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < nrows; i++) { ncols[i] = 1; } rows = hypre_CTAlloc(HYPRE_Int, nrows); cols = hypre_CTAlloc(HYPRE_Int, nrows); ijvalues = hypre_CTAlloc(double, nrows); sy = (hypre_IndexX(iupper) - hypre_IndexX(ilower) + 1); sz = (hypre_IndexY(iupper) - hypre_IndexY(ilower) + 1) * sy; hypre_SStructGridIntersect(grid, part, var, box, -1, &boxman_entries, &nboxman_entries); for (ii = 0; ii < nboxman_entries; ii++) { /* GEC1002 introducing the strides based on the type of the matrix */ hypre_SStructBoxManEntryGetStrides(boxman_entries[ii], rs, matrix_type); hypre_BoxSetExtents(box, ilower, iupper); hypre_BoxManEntryGetExtents(boxman_entries[ii], hypre_BoxIMin(map_box), hypre_BoxIMax(map_box)); hypre_IntersectBoxes(box, map_box, int_box); hypre_CopyBox(int_box, box); nrows = 0; for (e = 0; e < nentries; e++) { entry = entries[e]; hypre_CopyBox(box, to_box); offset = shape[entry]; hypre_BoxIMinX(to_box) += hypre_IndexX(offset); hypre_BoxIMinY(to_box) += hypre_IndexY(offset); hypre_BoxIMinZ(to_box) += hypre_IndexZ(offset); hypre_BoxIMaxX(to_box) += hypre_IndexX(offset); hypre_BoxIMaxY(to_box) += hypre_IndexY(offset); hypre_BoxIMaxZ(to_box) += hypre_IndexZ(offset); hypre_SStructGridIntersect(dom_grid, part, vars[entry], to_box, -1, &boxman_to_entries, &nboxman_to_entries); for (jj = 0; jj < nboxman_to_entries; jj++) { /* introducing the strides based on the type of the * matrix */ hypre_SStructBoxManEntryGetStrides(boxman_to_entries[jj], cs, matrix_type); hypre_BoxManEntryGetExtents(boxman_to_entries[jj], hypre_BoxIMin(map_box), hypre_BoxIMax(map_box)); hypre_IntersectBoxes(to_box, map_box, int_box); hypre_CopyIndex(hypre_BoxIMin(int_box), index); /* GEC1002 introducing the rank based on the type of * the matrix */ hypre_SStructBoxManEntryGetGlobalRank(boxman_to_entries[jj], index, &col_base, matrix_type); hypre_IndexX(index) -= hypre_IndexX(offset); hypre_IndexY(index) -= hypre_IndexY(offset); hypre_IndexZ(index) -= hypre_IndexZ(offset); /* GEC1002 introducing the rank based on the type of * the matrix */ hypre_SStructBoxManEntryGetGlobalRank(boxman_entries[ii], index, &row_base, matrix_type); hypre_IndexX(index) -= hypre_IndexX(ilower); hypre_IndexY(index) -= hypre_IndexY(ilower); hypre_IndexZ(index) -= hypre_IndexZ(ilower); val_base = e + (hypre_IndexX(index) + hypre_IndexY(index)*sy + hypre_IndexZ(index)*sz) * nentries; /* RDF: THREAD */ for (k = 0; k < hypre_BoxSizeZ(int_box); k++) { for (j = 0; j < hypre_BoxSizeY(int_box); j++) { for (i = 0; i < hypre_BoxSizeX(int_box); i++) { rows[nrows] = row_base + i*rs[0] + j*rs[1] + k*rs[2]; cols[nrows] = col_base + i*cs[0] + j*cs[1] + k*cs[2]; ijvalues[nrows] = values[val_base + (i + j*sy + k*sz)*nentries]; nrows++; } } } } /* end loop through boxman to entries */ hypre_TFree(boxman_to_entries); } /* end of e nentries loop */ /*------------------------------------------ * set IJ values one stencil entry at a time *------------------------------------------*/ if (action > 0) { HYPRE_IJMatrixAddToValues(ijmatrix, nrows, ncols, (const HYPRE_Int *) rows, (const HYPRE_Int *) cols, (const double *) ijvalues); } else if (action > -1) { HYPRE_IJMatrixSetValues(ijmatrix, nrows, ncols, (const HYPRE_Int *) rows, (const HYPRE_Int *) cols, (const double *) ijvalues); } else { HYPRE_IJMatrixGetValues(ijmatrix, nrows, ncols, rows, cols, values); } } /* end loop through boxman entries */ hypre_TFree(boxman_entries); hypre_TFree(ncols); hypre_TFree(rows); hypre_TFree(cols); hypre_TFree(ijvalues); hypre_BoxDestroy(to_box); hypre_BoxDestroy(map_box); hypre_BoxDestroy(int_box); } /*------------------------------------------ * non-stencil entries *------------------------------------------*/ else { hypre_CopyIndex(ilower, hypre_BoxIMin(box)); hypre_CopyIndex(iupper, hypre_BoxIMax(box)); /* RDF: THREAD (Check safety on UMatrixSetValues call) */ for (k = hypre_BoxIMinZ(box); k <= hypre_BoxIMaxZ(box); k++) { for (j = hypre_BoxIMinY(box); j <= hypre_BoxIMaxY(box); j++) { for (i = hypre_BoxIMinX(box); i <= hypre_BoxIMaxX(box); i++) { hypre_SetIndex(index, i, j, k); hypre_SStructUMatrixSetValues(matrix, part, index, var, nentries, entries, values, action); values += nentries; } } } } hypre_BoxDestroy(box); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructUMatrixAssemble( hypre_SStructMatrix *matrix ) { HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix); HYPRE_IJMatrixAssemble(ijmatrix); HYPRE_IJMatrixGetObject( ijmatrix, (void **) &hypre_SStructMatrixParCSRMatrix(matrix)); return hypre_error_flag; } /*========================================================================== * SStructMatrix routines *==========================================================================*/ /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixRef( hypre_SStructMatrix *matrix, hypre_SStructMatrix **matrix_ref ) { hypre_SStructMatrixRefCount(matrix) ++; *matrix_ref = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSplitEntries( hypre_SStructMatrix *matrix, HYPRE_Int part, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, HYPRE_Int *nSentries_ptr, HYPRE_Int **Sentries_ptr, HYPRE_Int *nUentries_ptr, HYPRE_Int **Uentries_ptr ) { hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); HYPRE_Int *split = hypre_SStructMatrixSplit(matrix, part, var); hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var); HYPRE_Int entry; HYPRE_Int i; HYPRE_Int nSentries = 0; HYPRE_Int *Sentries = hypre_SStructMatrixSEntries(matrix); HYPRE_Int nUentries = 0; HYPRE_Int *Uentries = hypre_SStructMatrixUEntries(matrix); for (i = 0; i < nentries; i++) { entry = entries[i]; if (entry < hypre_SStructStencilSize(stencil)) { /* stencil entries */ if (split[entry] > -1) { Sentries[nSentries] = split[entry]; nSentries++; } else { Uentries[nUentries] = entry; nUentries++; } } else { /* non-stencil entries */ Uentries[nUentries] = entry; nUentries++; } } *nSentries_ptr = nSentries; *Sentries_ptr = Sentries; *nUentries_ptr = nUentries; *Uentries_ptr = Uentries; return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSetValues( HYPRE_SStructMatrix matrix, HYPRE_Int part, HYPRE_Int *index, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, double *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid); HYPRE_Int *Sentries; HYPRE_Int *Uentries; HYPRE_Int nSentries; HYPRE_Int nUentries; hypre_SStructPMatrix *pmatrix; hypre_Index cindex; hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries, &nSentries, &Sentries, &nUentries, &Uentries); hypre_CopyToCleanIndex(index, ndim, cindex); /* S-matrix */ if (nSentries > 0) { pmatrix = hypre_SStructMatrixPMatrix(matrix, part); hypre_SStructPMatrixSetValues(pmatrix, cindex, var, nSentries, Sentries, values, action); /* put inter-part couplings in UMatrix and zero them out in PMatrix * (possibly in ghost zones) */ if (nvneighbors[part][var] > 0) { hypre_SStructMatrixSetInterPartValues(matrix, part, cindex, cindex, var, nSentries, entries, values, action); } } /* U-matrix */ if (nUentries > 0) { hypre_SStructUMatrixSetValues(matrix, part, cindex, var, nUentries, Uentries, values, action); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * (action > 0): add-to values * (action = 0): set values * (action < 0): get values * (action =-2): get values and zero out *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSetBoxValues( HYPRE_SStructMatrix matrix, HYPRE_Int part, HYPRE_Int *ilower, HYPRE_Int *iupper, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, double *values, HYPRE_Int action ) { HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid); HYPRE_Int *Sentries; HYPRE_Int *Uentries; HYPRE_Int nSentries; HYPRE_Int nUentries; hypre_SStructPMatrix *pmatrix; hypre_Index cilower; hypre_Index ciupper; hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries, &nSentries, &Sentries, &nUentries, &Uentries); hypre_CopyToCleanIndex(ilower, ndim, cilower); hypre_CopyToCleanIndex(iupper, ndim, ciupper); /* S-matrix */ if (nSentries > 0) { pmatrix = hypre_SStructMatrixPMatrix(matrix, part); hypre_SStructPMatrixSetBoxValues(pmatrix, cilower, ciupper, var, nSentries, Sentries, values, action); /* put inter-part couplings in UMatrix and zero them out in PMatrix * (possibly in ghost zones) */ if (nvneighbors[part][var] > 0) { hypre_SStructMatrixSetInterPartValues(matrix, part, cilower, ciupper, var, nSentries, entries, values, action); } } /* U-matrix */ if (nUentries > 0) { hypre_SStructUMatrixSetBoxValues(matrix, part, cilower, ciupper, var, nUentries, Uentries, values, action); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * Put inter-part couplings in UMatrix and zero them out in PMatrix (possibly in * ghost zones). Assumes that all entries are stencil entries. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SStructMatrixSetInterPartValues( HYPRE_SStructMatrix matrix, HYPRE_Int part, hypre_Index ilower, hypre_Index iupper, HYPRE_Int var, HYPRE_Int nentries, HYPRE_Int *entries, double *values, HYPRE_Int action ) { hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); hypre_SStructPMatrix *pmatrix; hypre_SStructPGrid *pgrid; hypre_SStructStencil *stencil; hypre_Index *shape; HYPRE_Int *smap; HYPRE_Int *vars, frvartype, tovartype; hypre_StructMatrix *smatrix; hypre_Box *box, *ibox0, *ibox1, *tobox, *frbox; hypre_IndexRef offset; hypre_BoxManEntry **frentries, **toentries; hypre_SStructBoxManInfo *frinfo, *toinfo; double *tvalues = NULL; HYPRE_Int nfrentries, ntoentries, frpart, topart; HYPRE_Int entry, sentry, ei, fri, toi, i, j, k, vi, tvi, vistart; HYPRE_Int vnx, vny, vnz, inx, iny, inz, idx, idy, idz; pmatrix = hypre_SStructMatrixPMatrix(matrix, part); pgrid = hypre_SStructPMatrixPGrid(pmatrix); frvartype = hypre_SStructPGridVarType(pgrid, var); box = hypre_BoxCreate(); ibox0 = hypre_BoxCreate(); ibox1 = hypre_BoxCreate(); tobox = hypre_BoxCreate(); frbox = hypre_BoxCreate(); stencil = hypre_SStructPMatrixStencil(pmatrix, var); smap = hypre_SStructPMatrixSMap(pmatrix, var); shape = hypre_SStructStencilShape(stencil); vars = hypre_SStructStencilVars(stencil); vnx = hypre_IndexX(iupper) - hypre_IndexX(ilower) + 1; vny = hypre_IndexY(iupper) - hypre_IndexY(ilower) + 1; vnz = hypre_IndexZ(iupper) - hypre_IndexZ(ilower) + 1; for (ei = 0; ei < nentries; ei++) { entry = entries[ei]; sentry = smap[entry]; offset = shape[entry]; smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entry]); tovartype = hypre_SStructPGridVarType(pgrid, vars[entry]); /* shift box in the stencil offset direction */ hypre_BoxSetExtents(box, ilower, iupper); hypre_BoxIMinX(box) += hypre_IndexX(offset); hypre_BoxIMinY(box) += hypre_IndexY(offset); hypre_BoxIMinZ(box) += hypre_IndexZ(offset); hypre_BoxIMaxX(box) += hypre_IndexX(offset); hypre_BoxIMaxY(box) += hypre_IndexY(offset); hypre_BoxIMaxZ(box) += hypre_IndexZ(offset); /* get "to" entries */ hypre_SStructGridIntersect(grid, part, vars[entry], box, -1, &toentries, &ntoentries); for (toi = 0; toi < ntoentries; toi++) { hypre_BoxManEntryGetExtents( toentries[toi], hypre_BoxIMin(tobox), hypre_BoxIMax(tobox)); hypre_IntersectBoxes(box, tobox, ibox0); if (hypre_BoxVolume(ibox0)) { hypre_SStructBoxManEntryGetPart(toentries[toi], part, &topart); /* shift ibox0 back */ hypre_BoxIMinX(ibox0) -= hypre_IndexX(offset); hypre_BoxIMinY(ibox0) -= hypre_IndexY(offset); hypre_BoxIMinZ(ibox0) -= hypre_IndexZ(offset); hypre_BoxIMaxX(ibox0) -= hypre_IndexX(offset); hypre_BoxIMaxY(ibox0) -= hypre_IndexY(offset); hypre_BoxIMaxZ(ibox0) -= hypre_IndexZ(offset); /* get "from" entries */ hypre_SStructGridIntersect(grid, part, var, ibox0, -1, &frentries, &nfrentries); for (fri = 0; fri < nfrentries; fri++) { /* don't set couplings within the same part unless possibly for * cell data (to simplify periodic conditions for users) */ hypre_SStructBoxManEntryGetPart(frentries[fri], part, &frpart); if (topart == frpart) { if ( (frvartype != HYPRE_SSTRUCT_VARIABLE_CELL) || (tovartype != HYPRE_SSTRUCT_VARIABLE_CELL) ) { continue; } hypre_BoxManEntryGetInfo(frentries[fri], (void **) &frinfo); hypre_BoxManEntryGetInfo(toentries[toi], (void **) &toinfo); if ( hypre_SStructBoxManInfoType(frinfo) == hypre_SStructBoxManInfoType(toinfo) ) { continue; } } hypre_BoxManEntryGetExtents( frentries[fri], hypre_BoxIMin(frbox), hypre_BoxIMax(frbox)); hypre_IntersectBoxes(ibox0, frbox, ibox1); if (hypre_BoxVolume(ibox1)) { tvalues = hypre_TReAlloc(tvalues, double, hypre_BoxVolume(ibox1)); inx = hypre_BoxIMaxX(ibox1) - hypre_BoxIMinX(ibox1) + 1; iny = hypre_BoxIMaxY(ibox1) - hypre_BoxIMinY(ibox1) + 1; inz = hypre_BoxIMaxZ(ibox1) - hypre_BoxIMinZ(ibox1) + 1; idx = hypre_BoxIMinX(ibox1) - hypre_IndexX(ilower); idy = hypre_BoxIMinY(ibox1) - hypre_IndexY(ilower); idz = hypre_BoxIMinZ(ibox1) - hypre_IndexZ(ilower); vistart = (idz*vny*vnx + idy*vnx + idx)*nentries + ei; if (action >= 0) { /* set or add */ /* RDF: THREAD */ /* copy values into tvalues */ tvi = 0; for (k = 0; k < inz; k++) { for (j = 0; j < iny; j++) { vi = vistart + (k*vny*vnx + j*vnx)*nentries; for (i = 0; i < inx; i++) { tvalues[tvi] = values[vi]; tvi += 1; vi += nentries; } } } /* put values into UMatrix */ hypre_SStructUMatrixSetBoxValues( matrix, part, hypre_BoxIMin(ibox1), hypre_BoxIMax(ibox1), var, 1, &entry, tvalues, action); /* zero out values in PMatrix (possibly in ghost) */ hypre_StructMatrixClearBoxValues( smatrix, ibox1, 1, &sentry, -1, 1); } else { /* get */ /* get values from UMatrix */ hypre_SStructUMatrixSetBoxValues( matrix, part, hypre_BoxIMin(ibox1), hypre_BoxIMax(ibox1), var, 1, &entry, tvalues, action); /* RDF: THREAD */ /* copy tvalues into values */ tvi = 0; for (k = 0; k < inz; k++) { for (j = 0; j < iny; j++) { vi = vistart + (k*vny*vnx + j*vnx)*nentries; for (i = 0; i < inx; i++) { values[vi] = tvalues[tvi]; tvi += 1; vi += nentries; } } } } /* end if action */ } /* end if nonzero ibox1 */ } /* end of "from" boxman entries loop */ hypre_TFree(frentries); } /* end if nonzero ibox0 */ } /* end of "to" boxman entries loop */ hypre_TFree(toentries); } /* end of entries loop */ hypre_BoxDestroy(box); hypre_BoxDestroy(ibox0); hypre_BoxDestroy(ibox1); hypre_BoxDestroy(tobox); hypre_BoxDestroy(frbox); hypre_TFree(tvalues); return hypre_error_flag; }
nn_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #ifndef FLANN_NNINDEX_H #define FLANN_NNINDEX_H #include <vector> #include "flann/general.h" #include "flann/util/matrix.h" #include "flann/util/params.h" #include "flann/util/result_set.h" #include "flann/util/dynamic_bitset.h" #include "flann/util/saving.h" namespace flannO { #define KNN_HEAP_THRESHOLD 250 class IndexBase { public: virtual ~IndexBase() {}; virtual size_t veclen() const = 0; virtual size_t size() const = 0; virtual flann_algorithm_t getType() const = 0; virtual int usedMemory() const = 0; virtual IndexParams getParameters() const = 0; virtual void loadIndex(FILE* stream) = 0; virtual void saveIndex(FILE* stream) = 0; }; /** * Nearest-neighbour index base class */ template <typename Distance> class NNIndex : public IndexBase { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const NNIndex& other) : distance_(other.distance_), last_id_(other.last_id_), size_(other.size_), size_at_build_(other.size_at_build_), veclen_(other.veclen_), index_params_(other.index_params_), removed_(other.removed_), removed_points_(other.removed_points_), removed_count_(other.removed_count_), ids_(other.ids_), points_(other.points_), data_ptr_(NULL) { if (other.data_ptr_) { data_ptr_ = new ElementType[size_*veclen_]; std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } } virtual ~NNIndex() { if (data_ptr_) { delete[] data_ptr_; } } virtual NNIndex* clone() const = 0; /** * Builds the index */ virtual void buildIndex() { freeIndex(); cleanRemovedPoints(); // building index buildIndexImpl(); size_at_build_ = size_; } /** * Builds th index using using the specified dataset * @param dataset the dataset to use */ virtual void buildIndex(const Matrix<ElementType>& dataset) { setDataset(dataset); this->buildIndex(); } /** * @brief Incrementally add points to the index. * @param points Matrix with points to be added * @param rebuild_threshold */ virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { throw FLANNException("Functionality not supported by this index"); } /** * Remove point from the index * @param index Index of point to be removed */ virtual void removePoint(size_t id) { if (!removed_) { ids_.resize(size_); for (size_t i=0;i<size_;++i) { ids_[i] = i; } removed_points_.resize(size_); removed_points_.reset(); last_id_ = size_; removed_ = true; } size_t point_index = id_to_index(id); if (point_index!=size_t(-1) && !removed_points_.test(point_index)) { removed_points_.set(point_index); removed_count_++; } } /** * Get point with specific id * @param id * @return */ virtual ElementType* getPoint(size_t id) { size_t index = id_to_index(id); if (index!=size_t(-1)) { return points_[index]; } else { return NULL; } } /** * @return number of features in this index. */ inline size_t size() const { return size_ - removed_count_; } /** * @return The dimensionality of the features in this index. */ inline size_t veclen() const { return veclen_; } /** * Returns the parameters used by the index. * * @return The index parameters */ IndexParams getParameters() const { return index_params_; } template<typename Archive> void serialize(Archive& ar) { IndexHeader header; if (Archive::is_saving::value) { header.data_type = flann_datatype_value<ElementType>::value; header.index_type = getType(); header.rows = size_; header.cols = veclen_; } ar & header; // sanity checks if (Archive::is_loading::value) { if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) { throw FLANNException("Invalid index file, wrong signature"); } if (header.data_type != flann_datatype_value<ElementType>::value) { throw FLANNException("Datatype of saved index is different than of the one to be created."); } if (header.index_type != getType()) { throw FLANNException("Saved index type is different then the current index type."); } // TODO: check for distance type } ar & size_; ar & veclen_; ar & size_at_build_; bool save_dataset; if (Archive::is_saving::value) { save_dataset = get_param(index_params_,"save_dataset", false); } ar & save_dataset; if (save_dataset) { if (Archive::is_loading::value) { if (data_ptr_) { delete[] data_ptr_; } data_ptr_ = new ElementType[size_*veclen_]; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } for (size_t i=0;i<size_;++i) { ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType)); } } else { if (points_.size()!=size_) { throw FLANNException("Saved index does not contain the dataset and no dataset was provided."); } } ar & last_id_; ar & ids_; ar & removed_; if (removed_) { ar & removed_points_; } ar & removed_count_; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { flannO::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = knnSearch(queries, indices_, dists, knn, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = knnSearch(queries, indices_, dists, knn, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indinces of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; size_t num_neighbors = std::min(indices.cols, dists.cols); int max_neighbors = params.max_neighbors; if (max_neighbors<0) max_neighbors = num_neighbors; else max_neighbors = std::min(max_neighbors,(int)num_neighbors); if (max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { // explicitly indicated to use unbounded radius result set // and we know there'll be enough room for resulting indices and dists if (params.max_neighbors<0 && (num_neighbors>=size())) { #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if (n>num_neighbors) n = num_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>max_neighbors) n = max_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { flannO::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = radiusSearch(queries, indices_, dists, radius, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indinces of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; // just count neighbors if (params.max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); if (params.max_neighbors<0) { // search for all neighbors #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>params.max_neighbors) n = params.max_neighbors; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = radiusSearch(queries, indices_, dists, radius, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0; protected: virtual void freeIndex() = 0; virtual void buildIndexImpl() = 0; size_t id_to_index(size_t id) { if (ids_.size()==0) { return id; } size_t point_index = size_t(-1); if (ids_[id]==id) { return id; } else { // binary search size_t start = 0; size_t end = ids_.size(); while (start<end) { size_t mid = (start+end)/2; if (ids_[mid]==id) { point_index = mid; break; } else if (ids_[mid]<id) { start = mid + 1; } else { end = mid; } } } return point_index; } void indices_to_ids(const size_t* in, size_t* out, size_t size) const { if (removed_) { for (size_t i=0;i<size;++i) { out[i] = ids_[in[i]]; } } } void setDataset(const Matrix<ElementType>& dataset) { size_ = dataset.rows; veclen_ = dataset.cols; last_id_ = 0; ids_.clear(); removed_points_.clear(); removed_ = false; removed_count_ = 0; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = dataset[i]; } } void extendDataset(const Matrix<ElementType>& new_points) { size_t new_size = size_ + new_points.rows; if (removed_) { removed_points_.resize(new_size); ids_.resize(new_size); } points_.resize(new_size); for (size_t i=size_;i<new_size;++i) { points_[i] = new_points[i-size_]; if (removed_) { ids_[i] = last_id_++; removed_points_.reset(i); } } size_ = new_size; } void cleanRemovedPoints() { if (!removed_) return; size_t last_idx = 0; for (size_t i=0;i<size_;++i) { if (!removed_points_.test(i)) { points_[last_idx] = points_[i]; ids_[last_idx] = ids_[i]; removed_points_.reset(last_idx); ++last_idx; } } points_.resize(last_idx); ids_.resize(last_idx); removed_points_.resize(last_idx); size_ = last_idx; removed_count_ = 0; } void swap(NNIndex& other) { std::swap(distance_, other.distance_); std::swap(last_id_, other.last_id_); std::swap(size_, other.size_); std::swap(size_at_build_, other.size_at_build_); std::swap(veclen_, other.veclen_); std::swap(index_params_, other.index_params_); std::swap(removed_, other.removed_); std::swap(removed_points_, other.removed_points_); std::swap(removed_count_, other.removed_count_); std::swap(ids_, other.ids_); std::swap(points_, other.points_); std::swap(data_ptr_, other.data_ptr_); } protected: /** * The distance functor */ Distance distance_; /** * Each index point has an associated ID. IDs are assigned sequentially in * increasing order. This indicates the ID assigned to the last point added to the * index. */ size_t last_id_; /** * Number of points in the index (and database) */ size_t size_; /** * Number of features in the dataset when the index was last built. */ size_t size_at_build_; /** * Size of one point in the index (and database) */ size_t veclen_; /** * Parameters of the index. */ IndexParams index_params_; /** * Flag indicating if at least a point was removed from the index */ bool removed_; /** * Array used to mark points removed from the index */ DynamicBitset removed_points_; /** * Number of points removed from the index */ size_t removed_count_; /** * Array of point IDs, returned by nearest-neighbour operations */ std::vector<size_t> ids_; /** * Point data */ std::vector<ElementType*> points_; /** * Pointer to dataset memory if allocated by this index, otherwise NULL */ ElementType* data_ptr_; }; #define USING_BASECLASS_SYMBOLS \ using NNIndex<Distance>::distance_;\ using NNIndex<Distance>::size_;\ using NNIndex<Distance>::size_at_build_;\ using NNIndex<Distance>::veclen_;\ using NNIndex<Distance>::index_params_;\ using NNIndex<Distance>::removed_points_;\ using NNIndex<Distance>::ids_;\ using NNIndex<Distance>::removed_;\ using NNIndex<Distance>::points_;\ using NNIndex<Distance>::extendDataset;\ using NNIndex<Distance>::setDataset;\ using NNIndex<Distance>::cleanRemovedPoints;\ using NNIndex<Distance>::indices_to_ids; } #endif //FLANN_NNINDEX_H
fibonacci2.c
/* OpenMP TASK Example: Computing Fibonacci Numbers */ #include <stdio.h> #include <omp.h> int *value, *done; omp_lock_t *dolock; int fib(int n) { int i, j; if (n<2) return n; else { omp_set_lock( &(dolock[n]) ); if (!done[n]) { #pragma omp task shared(i) firstprivate(n) i=fib(n-1); #pragma omp task shared(j) firstprivate(n) j=fib(n-2); #pragma omp taskwait value[n] = i+j; done[n] = 1; } omp_unset_lock( &(dolock[n]) ); return value[n]; } } int main() { int n = 10; value = (int *) malloc( n*sizeof(int) ); done = (int *) malloc( n*sizeof(int) ); dolock = (omp_lock_t *) malloc(n * sizeof(omp_lock-t)); #pragma omp parallel shared(n) { #pragma omp single printf ("fib(%d) = %d\n", n, fib(n)); } free(value); free(done); free(dolock); return 0; }
mandel-omp-for-row.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ #pragma omp parallel for schedule(runtime) for (row = 0; row < height; ++row) { for (col = 0; col < width; ++col) { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { #pragma omp critical { XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); } } #else output[row][col]=k; #endif } } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("mandel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
accuracy_cython.c
/* Generated by Cython 0.29.19 */ /* BEGIN: Cython Metadata { "distutils": { "extra_compile_args": [ "-fopenmp", "-ffast-math", "-march=native" ], "extra_link_args": [ "-fopenmp" ], "name": "glove.metrics.accuracy_cython", "sources": [ "glove/metrics/accuracy_cython.pyx" ] }, "module_name": "glove.metrics.accuracy_cython" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_19" #define CYTHON_HEX_VERSION 0x001D13F0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__glove__metrics__accuracy_cython #define __PYX_HAVE_API__glove__metrics__accuracy_cython /* Early includes */ #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "glove/metrics/accuracy_cython.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'glove.metrics.accuracy_cython' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static double __pyx_f_5glove_7metrics_15accuracy_cython_dot(__Pyx_memviewslice, __Pyx_memviewslice, int); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "glove.metrics.accuracy_cython" extern int __pyx_module_is_main_glove__metrics__accuracy_cython; int __pyx_module_is_main_glove__metrics__accuracy_cython = 0; /* Implementation of 'glove.metrics.accuracy_cython' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_k[] = "k"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_input[] = "input"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_score[] = "score"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_inputs[] = "inputs"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_wordvec[] = "wordvec"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_expected[] = "expected"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_skip_word[] = "skip_word"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_no_threads[] = "no_threads"; static const char __pyx_k_no_wordvec[] = "no_wordvec"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_violations[] = "violations"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_wordvec_norm[] = "wordvec_norm"; static const char __pyx_k_no_components[] = "no_components"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_rank_violations[] = "rank_violations"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_no_input_vectors[] = "no_input_vectors"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_score_of_expected[] = "score_of_expected"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_compute_rank_violations[] = "compute_rank_violations"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_glove_metrics_accuracy_cython[] = "glove.metrics.accuracy_cython"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_glove_metrics_accuracy_cython_py[] = "glove/metrics/accuracy_cython.pyx"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_compute_rank_violations; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_expected; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_n_s_glove_metrics_accuracy_cython; static PyObject *__pyx_kp_s_glove_metrics_accuracy_cython_py; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_input; static PyObject *__pyx_n_s_inputs; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_n_s_no_components; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_no_input_vectors; static PyObject *__pyx_n_s_no_threads; static PyObject *__pyx_n_s_no_wordvec; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_rank_violations; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_score; static PyObject *__pyx_n_s_score_of_expected; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_skip_word; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_violations; static PyObject *__pyx_n_s_wordvec; static PyObject *__pyx_n_s_wordvec_norm; static PyObject *__pyx_pf_5glove_7metrics_15accuracy_cython_compute_rank_violations(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_norm, __Pyx_memviewslice __pyx_v_input, __Pyx_memviewslice __pyx_v_expected, __Pyx_memviewslice __pyx_v_inputs, __Pyx_memviewslice __pyx_v_rank_violations, CYTHON_UNUSED int __pyx_v_no_threads); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__27; /* Late includes */ /* "glove/metrics/accuracy_cython.pyx":7 * * * cdef double dot(double[::1] x, # <<<<<<<<<<<<<< * double[::1] y, * int dim) nogil: */ static double __pyx_f_5glove_7metrics_15accuracy_cython_dot(__Pyx_memviewslice __pyx_v_x, __Pyx_memviewslice __pyx_v_y, int __pyx_v_dim) { int __pyx_v_i; double __pyx_v_result; double __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "glove/metrics/accuracy_cython.pyx":12 * * cdef int i * cdef double result = 0.0 # <<<<<<<<<<<<<< * * for i in range(dim): */ __pyx_v_result = 0.0; /* "glove/metrics/accuracy_cython.pyx":14 * cdef double result = 0.0 * * for i in range(dim): # <<<<<<<<<<<<<< * result += x[i] * y[i] * */ __pyx_t_1 = __pyx_v_dim; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "glove/metrics/accuracy_cython.pyx":15 * * for i in range(dim): * result += x[i] * y[i] # <<<<<<<<<<<<<< * * return result */ __pyx_t_4 = __pyx_v_i; __pyx_t_5 = __pyx_v_i; __pyx_v_result = (__pyx_v_result + ((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_x.data) + __pyx_t_4)) ))) * (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_y.data) + __pyx_t_5)) ))))); } /* "glove/metrics/accuracy_cython.pyx":17 * result += x[i] * y[i] * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "glove/metrics/accuracy_cython.pyx":7 * * * cdef double dot(double[::1] x, # <<<<<<<<<<<<<< * double[::1] y, * int dim) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "glove/metrics/accuracy_cython.pyx":20 * * * def compute_rank_violations(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordvec_norm, * double[:, ::1] input, */ /* Python wrapper */ static PyObject *__pyx_pw_5glove_7metrics_15accuracy_cython_1compute_rank_violations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5glove_7metrics_15accuracy_cython_compute_rank_violations[] = "\n Compute the rank violations\n of the expected words in the word analogy task.\n "; static PyMethodDef __pyx_mdef_5glove_7metrics_15accuracy_cython_1compute_rank_violations = {"compute_rank_violations", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5glove_7metrics_15accuracy_cython_1compute_rank_violations, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5glove_7metrics_15accuracy_cython_compute_rank_violations}; static PyObject *__pyx_pw_5glove_7metrics_15accuracy_cython_1compute_rank_violations(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_wordvec = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordvec_norm = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_input = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_expected = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_inputs = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_rank_violations = { 0, 0, { 0 }, { 0 }, { 0 } }; CYTHON_UNUSED int __pyx_v_no_threads; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("compute_rank_violations (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wordvec,&__pyx_n_s_wordvec_norm,&__pyx_n_s_input,&__pyx_n_s_expected,&__pyx_n_s_inputs,&__pyx_n_s_rank_violations,&__pyx_n_s_no_threads,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordvec)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wordvec_norm)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 1); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_input)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 2); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_expected)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 3); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inputs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 4); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rank_violations)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 5); __PYX_ERR(0, 20, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_no_threads)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, 6); __PYX_ERR(0, 20, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "compute_rank_violations") < 0)) __PYX_ERR(0, 20, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); } __pyx_v_wordvec = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordvec.memview)) __PYX_ERR(0, 20, __pyx_L3_error) __pyx_v_wordvec_norm = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wordvec_norm.memview)) __PYX_ERR(0, 21, __pyx_L3_error) __pyx_v_input = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_input.memview)) __PYX_ERR(0, 22, __pyx_L3_error) __pyx_v_expected = __Pyx_PyObject_to_MemoryviewSlice_ds_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_expected.memview)) __PYX_ERR(0, 23, __pyx_L3_error) __pyx_v_inputs = __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_inputs.memview)) __PYX_ERR(0, 24, __pyx_L3_error) __pyx_v_rank_violations = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[5], PyBUF_WRITABLE); if (unlikely(!__pyx_v_rank_violations.memview)) __PYX_ERR(0, 25, __pyx_L3_error) __pyx_v_no_threads = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_no_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 26, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("compute_rank_violations", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 20, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("glove.metrics.accuracy_cython.compute_rank_violations", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5glove_7metrics_15accuracy_cython_compute_rank_violations(__pyx_self, __pyx_v_wordvec, __pyx_v_wordvec_norm, __pyx_v_input, __pyx_v_expected, __pyx_v_inputs, __pyx_v_rank_violations, __pyx_v_no_threads); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5glove_7metrics_15accuracy_cython_compute_rank_violations(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_norm, __Pyx_memviewslice __pyx_v_input, __Pyx_memviewslice __pyx_v_expected, __Pyx_memviewslice __pyx_v_inputs, __Pyx_memviewslice __pyx_v_rank_violations, CYTHON_UNUSED int __pyx_v_no_threads) { int __pyx_v_i; int __pyx_v_j; int __pyx_v_k; CYTHON_UNUSED int __pyx_v_no_input_vectors; int __pyx_v_no_wordvec; int __pyx_v_skip_word; int __pyx_v_no_components; int __pyx_v_violations; double __pyx_v_score_of_expected; double __pyx_v_score; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_5; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; __Pyx_RefNannySetupContext("compute_rank_violations", 0); /* "glove/metrics/accuracy_cython.pyx":37 * cdef double score_of_expected, score * * no_input_vectors = input.shape[0] # <<<<<<<<<<<<<< * no_wordvec = wordvec.shape[0] * no_components = wordvec.shape[1] */ __pyx_v_no_input_vectors = (__pyx_v_input.shape[0]); /* "glove/metrics/accuracy_cython.pyx":38 * * no_input_vectors = input.shape[0] * no_wordvec = wordvec.shape[0] # <<<<<<<<<<<<<< * no_components = wordvec.shape[1] * */ __pyx_v_no_wordvec = (__pyx_v_wordvec.shape[0]); /* "glove/metrics/accuracy_cython.pyx":39 * no_input_vectors = input.shape[0] * no_wordvec = wordvec.shape[0] * no_components = wordvec.shape[1] # <<<<<<<<<<<<<< * * with nogil: */ __pyx_v_no_components = (__pyx_v_wordvec.shape[1]); /* "glove/metrics/accuracy_cython.pyx":41 * no_components = wordvec.shape[1] * * with nogil: # <<<<<<<<<<<<<< * for i in prange(no_input_vectors, num_threads=no_threads, * schedule='dynamic'): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "glove/metrics/accuracy_cython.pyx":42 * * with nogil: * for i in prange(no_input_vectors, num_threads=no_threads, # <<<<<<<<<<<<<< * schedule='dynamic'): * */ __pyx_t_1 = __pyx_v_no_input_vectors; if ((1 == 0)) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(__pyx_v_no_threads) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_5, __pyx_t_7, __pyx_t_8, __pyx_t_9) firstprivate(__pyx_t_4, __pyx_t_6) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_score) lastprivate(__pyx_v_score_of_expected) lastprivate(__pyx_v_skip_word) lastprivate(__pyx_v_violations) schedule(dynamic) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_2); /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); __pyx_v_k = ((int)0xbad0bad0); __pyx_v_score = ((double)__PYX_NAN()); __pyx_v_score_of_expected = ((double)__PYX_NAN()); __pyx_v_skip_word = ((int)0xbad0bad0); __pyx_v_violations = ((int)0xbad0bad0); /* "glove/metrics/accuracy_cython.pyx":46 * * # Compute the score of the expected word. * score_of_expected = (dot(input[i], # <<<<<<<<<<<<<< * wordvec[expected[i]], * no_components) */ __pyx_t_4.data = __pyx_v_input.data; __pyx_t_4.memview = __pyx_v_input.memview; __PYX_INC_MEMVIEW(&__pyx_t_4, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_input.strides[0]; __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_4.shape[0] = __pyx_v_input.shape[1]; __pyx_t_4.strides[0] = __pyx_v_input.strides[1]; __pyx_t_4.suboffsets[0] = -1; __pyx_t_5 = __pyx_v_i; /* "glove/metrics/accuracy_cython.pyx":47 * # Compute the score of the expected word. * score_of_expected = (dot(input[i], * wordvec[expected[i]], # <<<<<<<<<<<<<< * no_components) * / wordvec_norm[expected[i]]) */ __pyx_t_6.data = __pyx_v_wordvec.data; __pyx_t_6.memview = __pyx_v_wordvec.memview; __PYX_INC_MEMVIEW(&__pyx_t_6, 0); { Py_ssize_t __pyx_tmp_idx = (*((int *) ( /* dim=0 */ (__pyx_v_expected.data + __pyx_t_5 * __pyx_v_expected.strides[0]) ))); Py_ssize_t __pyx_tmp_stride = __pyx_v_wordvec.strides[0]; __pyx_t_6.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_6.shape[0] = __pyx_v_wordvec.shape[1]; __pyx_t_6.strides[0] = __pyx_v_wordvec.strides[1]; __pyx_t_6.suboffsets[0] = -1; __pyx_t_5 = __pyx_v_i; /* "glove/metrics/accuracy_cython.pyx":49 * wordvec[expected[i]], * no_components) * / wordvec_norm[expected[i]]) # <<<<<<<<<<<<<< * * # Compute all other scores and count */ __pyx_t_7 = (*((int *) ( /* dim=0 */ (__pyx_v_expected.data + __pyx_t_5 * __pyx_v_expected.strides[0]) ))); __pyx_v_score_of_expected = (__pyx_f_5glove_7metrics_15accuracy_cython_dot(__pyx_t_4, __pyx_t_6, __pyx_v_no_components) / (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordvec_norm.data) + __pyx_t_7)) )))); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "glove/metrics/accuracy_cython.pyx":53 * # Compute all other scores and count * # rank violations. * violations = 0 # <<<<<<<<<<<<<< * * for j in range(no_wordvec): */ __pyx_v_violations = 0; /* "glove/metrics/accuracy_cython.pyx":55 * violations = 0 * * for j in range(no_wordvec): # <<<<<<<<<<<<<< * * # Words from the input do not */ __pyx_t_8 = __pyx_v_no_wordvec; __pyx_t_9 = __pyx_t_8; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_j = __pyx_t_10; /* "glove/metrics/accuracy_cython.pyx":59 * # Words from the input do not * # count as violations. * skip_word = 0 # <<<<<<<<<<<<<< * for k in range(4): * if inputs[i, k] == j: */ __pyx_v_skip_word = 0; /* "glove/metrics/accuracy_cython.pyx":60 * # count as violations. * skip_word = 0 * for k in range(4): # <<<<<<<<<<<<<< * if inputs[i, k] == j: * skip_word = 1 */ for (__pyx_t_11 = 0; __pyx_t_11 < 4; __pyx_t_11+=1) { __pyx_v_k = __pyx_t_11; /* "glove/metrics/accuracy_cython.pyx":61 * skip_word = 0 * for k in range(4): * if inputs[i, k] == j: # <<<<<<<<<<<<<< * skip_word = 1 * break */ __pyx_t_5 = __pyx_v_i; __pyx_t_7 = __pyx_v_k; __pyx_t_12 = (((*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_inputs.data + __pyx_t_5 * __pyx_v_inputs.strides[0]) )) + __pyx_t_7)) ))) == __pyx_v_j) != 0); if (__pyx_t_12) { /* "glove/metrics/accuracy_cython.pyx":62 * for k in range(4): * if inputs[i, k] == j: * skip_word = 1 # <<<<<<<<<<<<<< * break * */ __pyx_v_skip_word = 1; /* "glove/metrics/accuracy_cython.pyx":63 * if inputs[i, k] == j: * skip_word = 1 * break # <<<<<<<<<<<<<< * * if skip_word == 1: */ goto __pyx_L13_break; /* "glove/metrics/accuracy_cython.pyx":61 * skip_word = 0 * for k in range(4): * if inputs[i, k] == j: # <<<<<<<<<<<<<< * skip_word = 1 * break */ } } __pyx_L13_break:; /* "glove/metrics/accuracy_cython.pyx":65 * break * * if skip_word == 1: # <<<<<<<<<<<<<< * continue * */ __pyx_t_12 = ((__pyx_v_skip_word == 1) != 0); if (__pyx_t_12) { /* "glove/metrics/accuracy_cython.pyx":66 * * if skip_word == 1: * continue # <<<<<<<<<<<<<< * * score = (dot(input[i], */ goto __pyx_L10_continue; /* "glove/metrics/accuracy_cython.pyx":65 * break * * if skip_word == 1: # <<<<<<<<<<<<<< * continue * */ } /* "glove/metrics/accuracy_cython.pyx":68 * continue * * score = (dot(input[i], # <<<<<<<<<<<<<< * wordvec[j], * no_components) */ __pyx_t_6.data = __pyx_v_input.data; __pyx_t_6.memview = __pyx_v_input.memview; __PYX_INC_MEMVIEW(&__pyx_t_6, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_input.strides[0]; __pyx_t_6.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_6.shape[0] = __pyx_v_input.shape[1]; __pyx_t_6.strides[0] = __pyx_v_input.strides[1]; __pyx_t_6.suboffsets[0] = -1; __pyx_t_4.data = __pyx_v_wordvec.data; /* "glove/metrics/accuracy_cython.pyx":69 * * score = (dot(input[i], * wordvec[j], # <<<<<<<<<<<<<< * no_components) * / wordvec_norm[j]) */ __pyx_t_4.memview = __pyx_v_wordvec.memview; __PYX_INC_MEMVIEW(&__pyx_t_4, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_j; Py_ssize_t __pyx_tmp_stride = __pyx_v_wordvec.strides[0]; __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_4.shape[0] = __pyx_v_wordvec.shape[1]; __pyx_t_4.strides[0] = __pyx_v_wordvec.strides[1]; __pyx_t_4.suboffsets[0] = -1; __pyx_t_7 = __pyx_v_j; /* "glove/metrics/accuracy_cython.pyx":71 * wordvec[j], * no_components) * / wordvec_norm[j]) # <<<<<<<<<<<<<< * * if score >= score_of_expected: */ __pyx_v_score = (__pyx_f_5glove_7metrics_15accuracy_cython_dot(__pyx_t_6, __pyx_t_4, __pyx_v_no_components) / (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordvec_norm.data) + __pyx_t_7)) )))); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 0); __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; /* "glove/metrics/accuracy_cython.pyx":73 * / wordvec_norm[j]) * * if score >= score_of_expected: # <<<<<<<<<<<<<< * violations = violations + 1 * */ __pyx_t_12 = ((__pyx_v_score >= __pyx_v_score_of_expected) != 0); if (__pyx_t_12) { /* "glove/metrics/accuracy_cython.pyx":74 * * if score >= score_of_expected: * violations = violations + 1 # <<<<<<<<<<<<<< * * # Update the average rank with the rank */ __pyx_v_violations = (__pyx_v_violations + 1); /* "glove/metrics/accuracy_cython.pyx":73 * / wordvec_norm[j]) * * if score >= score_of_expected: # <<<<<<<<<<<<<< * violations = violations + 1 * */ } __pyx_L10_continue:; } /* "glove/metrics/accuracy_cython.pyx":78 * # Update the average rank with the rank * # of this example. * rank_violations[i] = violations # <<<<<<<<<<<<<< */ __pyx_t_7 = __pyx_v_i; *((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_rank_violations.data) + __pyx_t_7)) )) = __pyx_v_violations; } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "glove/metrics/accuracy_cython.pyx":41 * no_components = wordvec.shape[1] * * with nogil: # <<<<<<<<<<<<<< * for i in prange(no_input_vectors, num_threads=no_threads, * schedule='dynamic'): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "glove/metrics/accuracy_cython.pyx":20 * * * def compute_rank_violations(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordvec_norm, * double[:, ::1] input, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec_norm, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_input, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_expected, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_inputs, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_rank_violations, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "glove.metrics.accuracy_cython.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "glove.metrics.accuracy_cython.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "glove.metrics.accuracy_cython.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "glove.metrics.accuracy_cython._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_accuracy_cython(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_accuracy_cython}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "accuracy_cython", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_compute_rank_violations, __pyx_k_compute_rank_violations, sizeof(__pyx_k_compute_rank_violations), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_expected, __pyx_k_expected, sizeof(__pyx_k_expected), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_n_s_glove_metrics_accuracy_cython, __pyx_k_glove_metrics_accuracy_cython, sizeof(__pyx_k_glove_metrics_accuracy_cython), 0, 0, 1, 1}, {&__pyx_kp_s_glove_metrics_accuracy_cython_py, __pyx_k_glove_metrics_accuracy_cython_py, sizeof(__pyx_k_glove_metrics_accuracy_cython_py), 0, 0, 1, 0}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_input, __pyx_k_input, sizeof(__pyx_k_input), 0, 0, 1, 1}, {&__pyx_n_s_inputs, __pyx_k_inputs, sizeof(__pyx_k_inputs), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_n_s_no_components, __pyx_k_no_components, sizeof(__pyx_k_no_components), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_no_input_vectors, __pyx_k_no_input_vectors, sizeof(__pyx_k_no_input_vectors), 0, 0, 1, 1}, {&__pyx_n_s_no_threads, __pyx_k_no_threads, sizeof(__pyx_k_no_threads), 0, 0, 1, 1}, {&__pyx_n_s_no_wordvec, __pyx_k_no_wordvec, sizeof(__pyx_k_no_wordvec), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_rank_violations, __pyx_k_rank_violations, sizeof(__pyx_k_rank_violations), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_score, __pyx_k_score, sizeof(__pyx_k_score), 0, 0, 1, 1}, {&__pyx_n_s_score_of_expected, __pyx_k_score_of_expected, sizeof(__pyx_k_score_of_expected), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_skip_word, __pyx_k_skip_word, sizeof(__pyx_k_skip_word), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_violations, __pyx_k_violations, sizeof(__pyx_k_violations), 0, 0, 1, 1}, {&__pyx_n_s_wordvec, __pyx_k_wordvec, sizeof(__pyx_k_wordvec), 0, 0, 1, 1}, {&__pyx_n_s_wordvec_norm, __pyx_k_wordvec_norm, sizeof(__pyx_k_wordvec_norm), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 14, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "glove/metrics/accuracy_cython.pyx":20 * * * def compute_rank_violations(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordvec_norm, * double[:, ::1] input, */ __pyx_tuple__19 = PyTuple_Pack(17, __pyx_n_s_wordvec, __pyx_n_s_wordvec_norm, __pyx_n_s_input, __pyx_n_s_expected, __pyx_n_s_inputs, __pyx_n_s_rank_violations, __pyx_n_s_no_threads, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_no_input_vectors, __pyx_n_s_no_wordvec, __pyx_n_s_skip_word, __pyx_n_s_no_components, __pyx_n_s_violations, __pyx_n_s_score_of_expected, __pyx_n_s_score); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(7, 0, 17, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_glove_metrics_accuracy_cython_py, __pyx_n_s_compute_rank_violations, 20, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 20, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__26 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initaccuracy_cython(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initaccuracy_cython(void) #else __Pyx_PyMODINIT_FUNC PyInit_accuracy_cython(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_accuracy_cython(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_accuracy_cython(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'accuracy_cython' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_accuracy_cython(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("accuracy_cython", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_glove__metrics__accuracy_cython) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "glove.metrics.accuracy_cython")) { if (unlikely(PyDict_SetItemString(modules, "glove.metrics.accuracy_cython", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "glove/metrics/accuracy_cython.pyx":20 * * * def compute_rank_violations(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordvec_norm, * double[:, ::1] input, */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5glove_7metrics_15accuracy_cython_1compute_rank_violations, NULL, __pyx_n_s_glove_metrics_accuracy_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_compute_rank_violations, __pyx_t_1) < 0) __PYX_ERR(0, 20, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "glove/metrics/accuracy_cython.pyx":1 * #!python # <<<<<<<<<<<<<< * #cython: boundscheck=False, wraparound=False, cdivision=True, initializedcheck=False * */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init glove.metrics.accuracy_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init glove.metrics.accuracy_cython"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return PyUnicode_FromUnicode(NULL, 0); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
task_nested_lvl2_omp.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See LICENSE.txt in top-level directory. */ #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define NUM_TASKS 50000 #define NUM_REPS 1 #define LEVELS 2 int o = 0; void sscal(float value, float *a) { *a = *a * value; } void na(float value) { o++; } void presscal(float value, float *a, int lvl, int i) { if (lvl > 1) { lvl--; #pragma omp task { presscal(value, a, lvl, i); } #pragma omp task { presscal(value, a, lvl, i); } } else { #pragma omp task { sscal(value, a); } #pragma omp task { na(value); } } } int main(int argc, char *argv[]) { int i, r, nthreads; double *time, avg_time = 0.0; char *str, *endptr; float *a; double time2 = 0.0; #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } if (argc > 1) { str = argv[1]; } int ntasks = argc > 1 ? strtoll(str, &endptr, 10) : NUM_TASKS; int lvl = (argc > 2) ? atoi(argv[2]) : LEVELS; int rep = (argc > 3) ? atoi(argv[3]) : NUM_REPS; time = malloc(sizeof(double) * rep); a = malloc(sizeof(float) * ntasks); for (i = 0; i < ntasks; i++) { a[i] = i + 100.0f; } for (r = 0; r < rep; r++) { time[r] = omp_get_wtime(); #pragma omp parallel { #pragma omp single { time2 = omp_get_wtime(); for (i = 0; i < ntasks; i++) { #pragma omp task firstprivate(i) { presscal(0.9f, &a[i], lvl, i); } } time2 = omp_get_wtime() - time2; } } time[r] = omp_get_wtime() - time[r]; avg_time += time[r]; } // TODO: Just works with one repetition for (i = 0; i < ntasks; i++) { if (a[i] != (i + 100.0f) * 0.9f) { printf("error: a[%d]=%2.f expected %2.f\n", i, a[i], (i + 100.0f) * 0.9f); } } avg_time /= rep; printf("nthreads: %d\nntasks: %d\nTime(s):%f\nCreation Time: %f\n", nthreads, ntasks, avg_time, time2); printf("o=%d deberia valer %d\n", o, ntasks); return EXIT_SUCCESS; }
matrixadd_row.c
#include<stdio.h> #include<stdlib.h> #include "generic.h" #define size 10000 #define NT 8 int A[size][size]; int B[size][size]; int C[size][size]; int flag[size];//to set flag[i]==1 if arr[i] is maximum int main(int argc, char *argv[]){ srand(atoi(argv[1]));//Seed for random number command line integer value //generates random number for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ A[i][j]=rand()%1048576; B[i][j]=rand()%1048576; } } double t1=rtclock(); #pragma omp parallel for num_threads(8) for(int i=0;i<size;i++) for(int j=0;j<size;j++) C[i][j]=A[i][j]+B[i][j]; double t2=rtclock(); printf("\nTIME =%f \n",(t2-t1)*1000); } /*Run executable-path <integer-seed-value> *example: ./a.out 3 */
predict.c
/** * @file * @author Ryan Moore * @brief I contain the predict function for ONF. */ #include <assert.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <rya.h> #include "onf.h" #include "predict.h" #include "array.h" #include "const.h" #include "file.h" #include "onf.h" #include "tommyarray.h" #include "rlib.h" #include "onf_math.h" #ifdef OPENMP #include <omp.h> #endif #define PSEUDO_COUNT = 16 #define TODO_VIRUS_GENOME_LENGTH = 32000 int main(int argc, char* argv[]) { if (argc != 5) { fprintf(stderr, "\n\nonf %s\n\n", ONF_VERSION); fprintf(stderr, "usage: %s num_threads /dir/with/host/counts /dir/with/virus/counts outdir\n", argv[0]); return 1; } char* arg_num_threads = argv[1]; // TODO actually set the number of threads char* arg_host_dir = argv[2]; char* arg_virus_dir = argv[3]; char* arg_output_dir = argv[4]; // Make the outdir if it does not exist. struct stat st; // Check for existence. if (stat(arg_output_dir, &st) < 0) { // If not, create the directory. // read/write/search permissions for owner and group. // read/search permissions for others. if (mkdir(arg_output_dir, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) != 0) { fprintf(stderr, "ERROR -- cannot create directory %s\n", arg_output_dir); return 1; } } #ifdef OPENMP omp_set_num_threads(3); #endif // Get count file names. tommy_array* host_fnames = onf_file_files_in_dir(arg_host_dir); assert(host_fnames); size_t num_input_files_host = tommy_array_size(host_fnames); tommy_array* virus_fnames = onf_file_files_in_dir(arg_virus_dir); assert(virus_fnames); size_t num_input_files_virus = tommy_array_size(virus_fnames); tommy_array* host_counts = malloc(sizeof(tommy_array)); assert(host_counts); tommy_array_init(host_counts); tommy_array* virus_counts = malloc(sizeof(tommy_array)); assert(virus_counts); tommy_array_init(virus_counts); for (int i = 0; i < num_input_files_host; ++i) { char* fname = tommy_array_get(host_fnames, i); assert(fname); // read counts struct onf_rya_int_array** counts = onf_read_counts2(fname); assert(counts != ONF_ERROR_PTR); struct count_array_info* count_info = malloc(sizeof(struct count_array_info)); assert(count_info); count_info->fname = fname; count_info->counts = counts; tommy_array_insert(host_counts, count_info); } // Do the same thing for the viruses. for (int i = 0; i < num_input_files_virus; ++i) { char* fname = tommy_array_get(virus_fnames, i); assert(fname); // read counts struct onf_rya_int_array** counts = onf_read_counts2(fname); assert(counts); struct count_array_info* count_info = malloc(sizeof(struct count_array_info)); assert(count_info); count_info->fname = fname; count_info->counts = counts; tommy_array_insert(virus_counts, count_info); } // TODO technically, I don't have to save all the viral counts. I can just do the calculation after reading each virus file. int z = 0; assert(num_input_files_virus == tommy_array_size(virus_counts)); assert(num_input_files_host == tommy_array_size(host_counts)); fprintf(stdout, "vir\thost\tll\n"); #pragma omp parallel for schedule(auto) private(z) for (z = 0; z < num_input_files_virus; ++z) { struct count_array_info* virus_count_info = tommy_array_get(virus_counts, z); for (int i = 0; i < num_input_files_host; ++i) { struct count_array_info* host_count_info = tommy_array_get(host_counts, i); // calculate score todo get actuall values rather than 16 and 32,000 double log_lik = score((host_count_info->counts), (virus_count_info->counts), 16); fprintf(stdout, "%s\t%s\t%lf\n", (virus_count_info->fname), (host_count_info->fname), log_lik); } } }
zlange.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lange * * Returns the norm of a general matrix as * * zlange = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm * ( * ( norm1(A), NORM = PlasmaOneNorm * ( * ( normI(A), NORM = PlasmaInfNorm * ( * ( normF(A), NORM = PlasmaFrobeniusNorm * * where norm1 denotes the one norm of a matrix (maximum column sum), * normI denotes the infinity norm of a matrix (maximum row sum) and * normF denotes the Frobenius norm of a matrix (square root of sum * of squares). Note that max(abs(A(i,j))) is not a consistent matrix * norm. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: max norm * - PlasmaOneNorm: one norm * - PlasmaInfNorm: infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] m * The number of rows of the matrix A. m >= 0. When m = 0, * the returned value is set to zero. * * @param[in] n * The number of columns of the matrix A. n >= 0. When n = 0, * the returned value is set to zero. * * @param[in] pA * The m-by-n matrix A. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************* * * @retval double * The specified norm of the general matrix A. * ******************************************************************************* * * @sa plasma_omp_zlange * @sa plasma_clange * @sa plasma_dlange * @sa plasma_slange * ******************************************************************************/ double plasma_zlange(plasma_enum_t norm, int m, int n, plasma_complex64_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) { plasma_error("illegal value of norm"); return -1; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (lda < imax(1, m)) { printf("%d\n", lda); plasma_error("illegal value of lda"); return -5; } // quick return if (imin(n, m) == 0) return 0.0; // Tune parameters. if (plasma->tuning) plasma_tune_lange(plasma, PlasmaComplexDouble, m, n); // Set tiling parameters int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. double *work = NULL; switch (norm) { case PlasmaMaxNorm: work = (double*)malloc((size_t)A.mt*A.nt*sizeof(double)); break; case PlasmaOneNorm: work = (double*)malloc(((size_t)A.mt*A.n+A.n)*sizeof(double)); break; case PlasmaInfNorm: work = (double*)malloc(((size_t)A.nt*A.m+A.m)*sizeof(double)); break; case PlasmaFrobeniusNorm: work = (double*)malloc((size_t)2*A.mt*A.nt*sizeof(double)); break; } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); double value; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_zlange(norm, A, work, &value, &sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&A); // Return the norm. return value; } /***************************************************************************//** * * @ingroup plasma_lange * * Calculates the max, one, infinity or Frobenius norm of a general matrix. * Non-blocking equivalent of plasma_zlange(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] A * The descriptor of matrix A. * * @param[out] work * Workspace of size: * - PlasmaMaxNorm: A.mt*A.nt * - PlasmaOneNorm: A.mt*A.n + A.n * - PlasmaInfNorm: A.nt*A.m + A.m * - PlasmaFrobeniusNorm: 2*A.mt*A.nt * * @param[out] value * The calculated value of the norm requested. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zlange * @sa plasma_omp_clange * @sa plasma_omp_dlange * @sa plasma_omp_slange * ******************************************************************************/ void plasma_omp_zlange(plasma_enum_t norm, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) { plasma_error("illegal value of norm"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) { *value = 0.0; return; } // Call the parallel function. plasma_pzlange(norm, A, work, value, sequence, request); }
guess2.c
#include "math.h" #include <stdio.h> #include <stdlib.h> #define _DISP //#define _LABEL #define EXP 2 struct number{ int num[4]; int flag; }; //double LABEL[13]={360,1440,1260,264,9,480,720,216,8,180,72,6,24}; struct number initarray[5040]; inline void num2p(int num,int *p){ int i; for(i=0;i<4;i++) *(p++)=0; i=3; while(num){ *(--p)=num%10; num=num/10; } } inline int check1(int * p){ int i,j; for(i=0;i<4;i++){ for(j=i+1;j<4;j++){ if(p[i]==p[j]) return 0; } } return 1; } void PreInitArray(){ int i,j; int cnt=0; int numt[4]; //struct number * arrayp=initarray; for(i=123;i<=9876;i++){ num2p(i,numt); if(check1(numt)){ initarray[cnt].flag=1; for(j=0;j<4;j++) { initarray[cnt].num[j]=numt[j]; } cnt++; } } #ifdef _LABEL /* for(i=0;i<=4;i++){ for(j=0;j<=4-i;j++){ LABEL[i*(11-i)/2+j]= pow((4*(i+j-1.6)*(i+j-1.6)+(i-0.4)*(i-0.4)),-1); } } for(i=0;i<13;i++) printf("%9d",i); printf("\n"); */ for(i=0;i<13;i++) { LABEL[i]= pow( LABEL[i] ,1.85 ) ; printf("%9f",LABEL[i]); } #endif printf("\nPre Iint Over!\n"); } void InitArray(struct number * nump){ int i,j; for(i=0;i<5040;i++){ for(j=0;j<4;j++) nump[i].num[j]=initarray[i].num[j]; nump[i].flag=1; } } inline void check2(int * num0,int *numg,int *a,int *b){ int i,j; *a=0; *b=0; for(i=0;i<4;i++){ if(num0[i]==numg[i]) (*a)++; for(j=0;j<4;j++){ if(num0[i]==numg[j]) (*b)++; } } (*b)-=(*a); } double Division(struct number * array,double cnt,int *nump){ int hist[15]={0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0}; int i; //for(i=0;i<15;i++) // hist[i]=0; int ta,tb; for(i=0;i<5040;i++){ if(array[i].flag){ check2(array[i].num,nump,&ta,&tb); hist[ta*(11-ta)/2+tb]++; } } double div=0; double temp; for(i=0;i<13;i++){ if(hist[i]!=0) { temp=pow( hist[i], EXP); #ifdef _LABEL temp=LABEL[i]*temp*temp; #endif div+=temp; } } return div; } int BestDivision(struct number * array,int count){ double best=10000*10000+0.0; int bestindex=-1; double new; int i; double cnt=0.0; // direct /* for(i=0;i<5040;i++){ if(array[i].flag!=0) return i; }*/ for(i=0;i<5040;i++) cnt+=array[i].flag; // printf("shengyu cnt:%f\n",cnt); if(cnt<1.1){ for(i=0;i<5040;i++){ if(array[i].flag) return i; } } cnt=cnt/13.0; for(i=0;i<5040;i++){ if( (array[i].flag)) { new=Division(array,cnt,array[i].num); if(best>new){ best=new; bestindex=i; } //if(best==13) // break; } /*else{ new2=Division(array,cnt,array[i].num); if(best2>new2){ best2=new; bestindex2=i; } }*/ } // printf("best min:%f\n",best); return bestindex; } int CCguess(int * num){ int numg[4]; int cnt=0; int i; int a,b,ta,tb; int ans; struct number array[5040]; //printf("Begin Init!\n"); InitArray(array); //printf("Init Over!\n"); for(i=0;i<4;i++) numg[i]=i; while(1){ check2(num,numg,&a,&b); //printf("a:%d,b:%d\n",a,b); cnt++; if(a==4&&b==0) return cnt; if(cnt>9) return 0; for(i=0;i<5040;i++){ if(array[i].flag){ check2(array[i].num,numg,&ta,&tb); array[i].flag=(ta==a && tb==b); } } // printf("best Error\n"); ans=BestDivision(array,cnt); // printf("Error: ans:%dcnt:%d\n",ans,cnt); for(i=0;i<4;i++) numg[i]=array[ans].num[i]; } } int main(){ PreInitArray(); int i,j,cnt=0; int ans; int hist[11]; for(i=0;i<11;i++) hist[i]=0; #pragma omp parallel for for(i=0;i<5040;i++){ ans=CCguess(initarray[i].num); hist[ans]++; for(j=0;j<4;j++) printf("%d",initarray[i].num[j]); printf(",%d ",ans); if(ans==0){ printf("\nError!\n"); //break; exit(1); } if(i%100==0) printf("%5d\n",i); } printf("time:"); for(j=1;j<11;j++) printf("%5d",j); printf("\n "); for(j=1;j<11;j++){ cnt+=hist[j]*j; printf("%5d",hist[j]); } printf("\naverage cnt:%12f\n",cnt/(5040+0.0)); return 1; }
27_omp_softcounter.c
// clang-format off // RUN: %run %s --omp --call-filter 2>&1 | %filecheck %s --check-prefix=CHECK-TSAN // RUN: %run %s -o -O2 --omp --call-filter 2>&1 | %filecheck %s --check-prefix=CHECK-TSAN // RUN: %run %s -o -O2 --omp --call-filter 2>&1 | %filecheck %s // RUN: %run %s --omp --call-filter 2>&1 | %filecheck %s // REQUIRES: openmp && softcounter // clang-format on #include <stdlib.h> void ptr(const int n) { // Sections can sometimes cause Max. Heap Allocs to be 1 (instead of more likely 2), if // thread execution order always frees one pointer before malloc of other. #pragma omp parallel sections num_threads(2) { #pragma omp section for (int i = 1; i <= n; i++) { double* d = (double*)malloc(sizeof(double) * n); free(d); } #pragma omp section for (int i = 1; i <= n; i++) { double* e = (double*)malloc(2 * sizeof(double) * n); free(e); } } } int main(int argc, char** argv) { const int n = 100; ptr(n); // CHECK-TSAN-NOT: ThreadSanitizer // CHECK: [Trace] TypeART Runtime Trace // CHECK-NOT: [Error] // CHECK: Alloc Stats from softcounters // CHECK-NEXT: Total heap : 200 , 200 , - // CHECK-NEXT: Total stack : 0 , 0 , - // CHECK-NEXT: Total global : 0 , 0 , - // CHECK-NEXT: Max. Heap Allocs : {{[1-2]}} , - , - // CHECK-NEXT: Max. Stack Allocs : 0 , - , - // CHECK-NEXT: Addresses checked : 0 , - , - // CHECK-NEXT: Distinct Addresses checked : 0 , - , - // CHECK-NEXT: Addresses re-used : 0 , - , - // CHECK-NEXT: Addresses missed : 0 , - , - // CHECK-NEXT: Distinct Addresses missed : 0 , - , - // CHECK-NEXT: Total free heap : 200 , 200 , - // CHECK-NEXT: Total free stack : 0 , 0 , - // CHECK-NEXT: OMP Stack/Heap/Free : 0 , 200 , 200 // CHECK-NEXT: Null/Zero/NullZero Addr : 0 , 0 , 0 // CHECK-NEXT: User-def. types : 0 , - , - // CHECK-NEXT: Estimated memory use (KiB) : {{[0-9]+}} , - , - // CHECK-NEXT: Bytes per node map/stack : 96 , 8 , - // CHECK-NEXT: {{(#|-)+}} // CHECK-NEXT: Allocation type detail (heap, stack, global) // CHECK: {{(#|-)+}} // CHECK-NEXT: Free allocation type detail (heap, stack) // CHECK-NEXT: 6 : 200 , 0 , double // CHECK: Per-thread counter values (2 threads) // CHECK-NEXT: Thread Heap Allocs : 100 , 100 // CHECK-NEXT: Thread Heap Arrays : 100 , 100 // CHECK-NEXT: Thread Heap Allocs Free : 100 , 100 // CHECK-NEXT: Thread Heap Arrays Free : 100 , 100 // CHECK-NEXT: Thread Stack Allocs : 0 , 0 // CHECK-NEXT: Thread Stack Arrays : 0 , 0 // CHECK-NEXT: Thread Max. Stack Allocs : 0 , 0 // CHECK-NEXT: Thread Stack Allocs Free : 0 , 0 // CHECK-NEXT: Thread Stack Array Free : 0 , 0 return 0; }
taskloop_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp taskloop simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd foo void test_no_clause() { int i; #pragma omp taskloop simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp taskloop simd' must be a for loop}} #pragma omp taskloop simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp taskloop simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd foo bar for (i = 0; i < 16; ++i) ; // expected-error@+1 {{directive '#pragma omp taskloop simd' cannot contain more than one 'nogroup' clause}} #pragma omp taskloop simd nogroup nogroup for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd; for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp parallel #pragma omp taskloop simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp taskloop simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp taskloop simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel #pragma omp taskloop simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp taskloop simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp taskloop simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp taskloop simd simdlen(64) safelen(8) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp taskloop simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp taskloop simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}} #pragma omp taskloop simd for (__int128 ii = 0; ii < 10; ii++) { c[ii] = a[ii] + b[ii]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected expression}} #pragma omp taskloop simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected expression}} #pragma omp taskloop simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} omp50-error@+1 {{expected variable name}} #pragma omp taskloop simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp taskloop simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp taskloop simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp taskloop simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp taskloop simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp taskloop simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp taskloop simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp taskloop simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp taskloop simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp taskloop simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp taskloop simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp taskloop simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp taskloop simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp taskloop simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp taskloop simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp taskloop simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp taskloop simd'}} for (int i = 0; i < 10; ++i) ; }
omp_loop.h
// -*- C++ -*- // Copyright (C) 2007-2019 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/omp_loop.h * @brief Parallelization of embarrassingly parallel execution by * means of an OpenMP for loop. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_OMP_LOOP_H #define _GLIBCXX_PARALLEL_OMP_LOOP_H 1 #include <omp.h> #include <parallel/settings.h> #include <parallel/basic_iterator.h> #include <parallel/base.h> namespace __gnu_parallel { /** @brief Embarrassingly parallel algorithm for random access * iterators, using an OpenMP for loop. * * @param __begin Begin iterator of element sequence. * @param __end End iterator of element sequence. * @param __o User-supplied functor (comparator, predicate, adding * functor, etc.). * @param __f Functor to @a process an element with __op (depends on * desired functionality, e. g. for std::for_each(), ...). * @param __r Functor to @a add a single __result to the already * processed elements (depends on functionality). * @param __base Base value for reduction. * @param __output Pointer to position where final result is written to * @param __bound Maximum number of elements processed (e. g. for * std::count_n()). * @return User-supplied functor (that may contain a part of the result). */ template<typename _RAIter, typename _Op, typename _Fu, typename _Red, typename _Result> _Op __for_each_template_random_access_omp_loop(_RAIter __begin, _RAIter __end, _Op __o, _Fu& __f, _Red __r, _Result __base, _Result& __output, typename std::iterator_traits<_RAIter>::difference_type __bound) { typedef typename std::iterator_traits<_RAIter>::difference_type _DifferenceType; _DifferenceType __length = __end - __begin; _ThreadIndex __num_threads = __gnu_parallel::min<_DifferenceType> (__get_max_threads(), __length); _Result *__thread_results; # pragma omp parallel num_threads(__num_threads) { # pragma omp single { __num_threads = omp_get_num_threads(); __thread_results = new _Result[__num_threads]; for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __thread_results[__i] = _Result(); } _ThreadIndex __iam = omp_get_thread_num(); #pragma omp for schedule(dynamic, _Settings::get().workstealing_chunk_size) for (_DifferenceType __pos = 0; __pos < __length; ++__pos) __thread_results[__iam] = __r(__thread_results[__iam], __f(__o, __begin+__pos)); } //parallel for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __output = __r(__output, __thread_results[__i]); delete [] __thread_results; // Points to last element processed (needed as return value for // some algorithms like transform). __f._M_finish_iterator = __begin + __length; return __o; } } // end namespace #endif /* _GLIBCXX_PARALLEL_OMP_LOOP_H */
omp-par-scope.c
/* This testcase is part of GDB, the GNU debugger. Copyright 2017-2020 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <omp.h> omp_lock_t lock; omp_lock_t lock2; /* Enforce execution order between two threads using a lock. */ static void omp_set_lock_in_order (int num, omp_lock_t *lock) { /* Ensure that thread num 0 first sets the lock. */ if (num == 0) omp_set_lock (lock); #pragma omp barrier /* Block thread num 1 until it can set the lock. */ if (num == 1) omp_set_lock (lock); /* This bit here is guaranteed to be executed first by thread num 0, and once thread num 0 unsets the lock, to be executed by thread num 1. */ ; } /* Testcase for checking access to variables in a single / outer scope. Make sure that variables not referred to in the parallel section are accessible from the debugger. */ void single_scope (void) { static int s1 = -41, s2 = -42, s3 = -43; int i1 = 11, i2 = 12, i3 = 13; #pragma omp parallel num_threads (2) shared (s1, i1) private (s2, i2) { int thread_num = omp_get_thread_num (); omp_set_lock_in_order (thread_num, &lock); s2 = 100 * (thread_num + 1) + 2; i2 = s2 + 10; #pragma omp critical printf ("single_scope: thread_num=%d, s1=%d, i1=%d, s2=%d, i2=%d\n", thread_num, s1, i1, s2, i2); omp_unset_lock (&lock); } printf ("single_scope: s1=%d, s2=%d, s3=%d, i1=%d, i2=%d, i3=%d\n", s1, s2, s3, i1, i2, i3); } static int file_scope_var = 9876; /* Testcase for checking access to variables from parallel region nested within more than one lexical scope. Of particular interest are variables which are not referenced in the parallel section. */ void multi_scope (void) { int i01 = 1, i02 = 2; { int i11 = 11, i12 = 12; { int i21 = -21, i22 = 22; #pragma omp parallel num_threads (2) \ firstprivate (i01) \ shared (i11) \ private (i21) { int thread_num = omp_get_thread_num (); omp_set_lock_in_order (thread_num, &lock); i21 = 100 * (thread_num + 1) + 21; #pragma omp critical printf ("multi_scope: thread_num=%d, i01=%d, i11=%d, i21=%d\n", thread_num, i01, i11, i21); omp_unset_lock (&lock); } printf ("multi_scope: i01=%d, i02=%d, i11=%d, " "i12=%d, i21=%d, i22=%d\n", i01, i02, i11, i12, i21, i22); } } } /* Nested functions in C is a GNU extension. Some non-GNU compilers define __GNUC__, but they don't support nested functions. So, unfortunately, we can't use that for our test. */ #if HAVE_NESTED_FUNCTION_SUPPORT /* Testcase for checking access of variables from within parallel region in a lexically nested function. */ void nested_func (void) { static int s1 = -42; int i = 1, j = 2, k = 3; void foo (int p, int q, int r) { int x = 4; { int y = 5, z = 6; #pragma omp parallel num_threads (2) shared (i, p, x) private (j, q, y) { int tn = omp_get_thread_num (); omp_set_lock_in_order (tn, &lock); j = 1000 * (tn + 1); q = j + 1; y = q + 1; #pragma omp critical printf ("nested_func: tn=%d: i=%d, p=%d, x=%d, j=%d, q=%d, y=%d\n", tn, i, p, x, j, q, y); omp_unset_lock (&lock); } } } foo (10, 11, 12); i = 101; j = 102; k = 103; foo (20, 21, 22); } #endif /* Testcase for checking access to variables from within a nested parallel region. */ void nested_parallel (void) { int i = 1, j = 2; int l = -1; omp_set_nested (1); omp_set_dynamic (0); #pragma omp parallel num_threads (2) private (l) { int num = omp_get_thread_num (); omp_set_lock_in_order (num, &lock); int nthr = omp_get_num_threads (); int off = num * nthr; int k = off + 101; l = off + 102; #pragma omp parallel num_threads (2) shared (num) { int inner_num = omp_get_thread_num (); omp_set_lock_in_order (inner_num, &lock2); #pragma omp critical printf ("nested_parallel (inner threads): outer thread num = %d, thread num = %d\n", num, inner_num); omp_unset_lock (&lock2); } #pragma omp critical printf ("nested_parallel (outer threads) %d: k = %d, l = %d\n", num, k, l); omp_unset_lock (&lock); } } int main (int argc, char **argv) { omp_init_lock (&lock); omp_init_lock (&lock2); single_scope (); multi_scope (); #if HAVE_NESTED_FUNCTION_SUPPORT nested_func (); #endif nested_parallel (); omp_destroy_lock (&lock); omp_destroy_lock (&lock2); return 0; }
GB_binop__plus_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp64) // A*D function (colscale): GB (_AxD__plus_fp64) // D*A function (rowscale): GB (_DxB__plus_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__plus_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__plus_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp64) // C=scalar+B GB (_bind1st__plus_fp64) // C=scalar+B' GB (_bind1st_tran__plus_fp64) // C=A+scalar GB (_bind2nd__plus_fp64) // C=A'+scalar GB (_bind2nd_tran__plus_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_FP64 || GxB_NO_PLUS_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pdzamax.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_pdzamax(plasma_enum_t colrow, plasma_desc_t A, double *work, double *values, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (colrow) { //=================== // PlasmaColumnwise //=================== case PlasmaColumnwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dzamax(PlasmaColumnwise, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); } } #pragma omp taskwait plasma_core_omp_damax(PlasmaRowwise, A.n, A.mt, work, A.n, values, sequence, request); break; //================ // PlasmaRowwise //================ case PlasmaRowwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_dzamax(PlasmaRowwise, mvam, nvan, A(m, n), ldam, &work[A.m*n+m*A.mb], sequence, request); } } #pragma omp taskwait plasma_core_omp_damax(PlasmaRowwise, A.m, A.nt, work, A.m, values, sequence, request); } }
omp.h
#ifndef PARLAY_INTERNAL_SCHEDULER_PLUGINS_OMP_H_ #define PARLAY_INTERNAL_SCHEDULER_PLUGINS_OMP_H_ #include <omp.h> namespace parlay { // IWYU pragma: private, include "../../parallel.h" inline size_t num_workers() { return omp_get_max_threads(); } inline size_t worker_id() { return omp_get_thread_num(); } template <class F> inline void parallel_for(size_t start, size_t end, F f, size_t, bool) { _Pragma("omp parallel for") for(size_t i=start; i<end; i++) f(i); } bool in_par_do = false; template <typename Lf, typename Rf> inline void par_do(Lf left, Rf right, bool) { if (!in_par_do) { in_par_do = true; // at top level start up tasking #pragma omp parallel #pragma omp single #pragma omp task left(); #pragma omp task right(); #pragma omp taskwait in_par_do = false; } else { // already started #pragma omp task left(); #pragma omp task right(); #pragma omp taskwait } } } // namespace parlay #endif // PARLAY_INTERNAL_SCHEDULER_PLUGINS_OMP_H_
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/token-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* Constant declaration. */ const char BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", MatteColor[] = "#bdbdbd", /* gray */ PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MagickPathExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=GetMagickTime(); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->matte_color=image_info->matte_color; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info)); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MagickPathExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MagickPathExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace,exception); append_image->depth=depth; append_image->alpha_trait=alpha_trait; append_image->page=images->page; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(next,&pixel); for (x=0; x < (ssize_t) next->columns; x++) { GetPixelInfoPixel(next,p,&pixel); SetPixelViaPixelInfo(append_image,&pixel,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,pathname, MagickPathExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,WritePixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { Image *clone_image; double scale; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->alpha_trait=image->alpha_trait; clone_image->channels=image->channels; clone_image->mask_trait=image->mask_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; clone_image->image_info=CloneImageInfo(image->image_info); (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MagickPathExtent); (void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent); (void) CopyMagickString(clone_image->filename,image->filename, MagickPathExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AcquireSemaphoreInfo(); if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse) clone_image=DestroyImage(clone_image); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->matte_color=image_info->matte_color; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->custom_stream=image_info->custom_stream; (void) CopyMagickString(clone_info->magick,image_info->magick, MagickPathExtent); (void) CopyMagickString(clone_info->unique,image_info->unique, MagickPathExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MagickPathExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception); % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,source_image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) geometry->width; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0) || (source_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info *) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,const PixelMask type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % */ MagickExport Image *GetImageMask(const Image *image,const PixelMask type, ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); switch (type) { case ReadPixelMask: { if ((image->channels & ReadMaskChannel) == 0) return((Image *) NULL); break; } case WritePixelMask: { if ((image->channels & WriteMaskChannel) == 0) return((Image *) NULL); break; } default: { if ((image->channels & CompositeMaskChannel) == 0) return((Image *) NULL); break; } } mask_image=AcquireImage((ImageInfo *) NULL,exception); status=SetImageExtent(mask_image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(mask_image)); status=MagickTrue; mask_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { switch (type) { case ReadPixelMask: { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); break; } case WritePixelMask: { SetPixelGray(mask_image,GetPixelWriteMask(image,p),q); break; } default: { SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q); break; } } p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; int c; MagickBooleanType canonical; register const char *p; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MagickPathExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MagickPathExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MagickPathExtent]; const char *option; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; option=(const char *) NULL; if (image != (Image *) NULL) option=GetImageProperty(image,pattern,exception); if ((option == (const char *) NULL) && (image != (Image *) NULL)) option=GetImageArtifact(image,pattern); if ((option == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) option=GetImageOption(image_info,pattern); if (option == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),option,(size_t) (MagickPathExtent-(p-format-offset))); offset+=strlen(pattern)-strlen(option)+3; *q=c; (void) ConcatenateMagickString(filename,r+1,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MagickPathExtent); else for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) (void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename))); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MagickPathExtent], filename[MagickPathExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const PixelInfo *) NULL); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; size_t length; ssize_t y; void *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset(pixels,0,length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum)); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: the level of transparency: 0 is fully transparent and QuantumRange % is fully opaque. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo background; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((image->background_color.alpha != OpaqueAlpha) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlphaChannel(image,OnAlphaChannel,exception); ConformPixelInfo(image,&image->background_color,&background,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { return(SetPixelChannelMask(image,channel_mask)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const PixelInfo *) NULL); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth == 0) { image->depth=8; (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } if (image->depth > (8*sizeof(MagickSizeType))) { image->depth=8*sizeof(MagickSizeType); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char component[MagickPathExtent], magic[MagickPathExtent], #if defined(MAGICKCORE_ZLIB_DELEGATE) || defined(MAGICKCORE_BZLIB_DELEGATE) path[MagickPathExtent], #endif *q; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *component='\0'; GetPathComponent(image_info->filename,SubimagePath,component); if (*component != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(component,MagickFalse) == MagickFalse) { if (IsGeometry(component) != MagickFalse) (void) CloneString(&image_info->extract,component); } else { size_t first, last; (void) CloneString(&image_info->scenes,component); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *component='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,component); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*component != '\0') if ((LocaleCompare(component,"gz") == 0) || (LocaleCompare(component,"Z") == 0) || (LocaleCompare(component,"svgz") == 0) || (LocaleCompare(component,"wmz") == 0)) { (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*component != '\0') if (LocaleCompare(component,"bz2") == 0) { (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*component != '\0') && (IsGlob(component) == MagickFalse)) { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,component,MagickPathExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MagickPathExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,component); else GetPathComponent(image_info->filename,SubcanonicalPath,component); (void) CopyMagickString(image_info->filename,component,MagickPathExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component, MagickPathExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,component,exception); if ((LocaleCompare(component,image_info->filename) != 0) && (strchr(component,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { unsigned char *magick; size_t magick_size; /* Determine the image format from the first few bytes of the file. */ magick_size=GetMagicPatternExtent(exception); if (magick_size == 0) return(MagickFalse); image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to seekable temporary file. */ *component='\0'; status=ImageToFile(image,component,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,component,MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,component, MagickPathExtent); image_info->temporary=MagickTrue; } magick=(unsigned char *) AcquireMagickMemory(magick_size); if (magick == (unsigned char *) NULL) { (void) CloseBlob(image); image=DestroyImage(image); return(MagickFalse); } (void) memset(magick,0,magick_size); count=ReadBlob(image,magick_size,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic cache. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); magick=(unsigned char *) RelinquishMagickMemory(magick); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { /* Try to use magick_info that was determined earlier by the extension */ if ((magick_info != (const MagickInfo *) NULL) && (GetMagickUseExtension(magick_info) != MagickFalse) && (LocaleCompare(magick_info->magick_module,GetMagicName( magic_info)) == 0)) (void) CopyMagickString(image_info->magick,magick_info->name, MagickPathExtent); else { (void) CopyMagickString(image_info->magick,GetMagicName( magic_info),MagickPathExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); } if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o C u s t o m S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoCustomStream() sets the image info custom stream handlers. % % The format of the SetImageInfoCustomStream method is: % % void SetImageInfoCustomStream(ImageInfo *image_info, % CustomStreamInfo *custom_stream) % % A description of each parameter follows: % % o image_info: the image info. % % o custom_stream: your custom stream methods. % */ MagickExport void SetImageInfoCustomStream(ImageInfo *image_info, CustomStreamInfo *custom_stream) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->custom_stream=(CustomStreamInfo *) custom_stream; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const PixelMask type, % const Image *mask,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type, const Image *mask,ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask == (const Image *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(mask,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=0.0; if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows)) intensity=GetPixelIntensity(mask,p); switch (type) { case ReadPixelMask: { SetPixelReadMask(image,ClampToQuantum(intensity),q); break; } case WritePixelMask: { SetPixelWriteMask(image,ClampToQuantum(intensity),q); break; } default: { SetPixelCompositeMask(image,ClampToQuantum(intensity),q); break; } } p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e R e g i o n M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageRegionMask() associates a mask with the image as defined by the % specified region. % % The format of the SetImageRegionMask method is: % % MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type, % const RectangleInfo *region,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o geometry: the mask region. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageRegionMask(Image *image, const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Set image mask as defined by the region. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (region == (const RectangleInfo *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; pixel=QuantumRange; if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) && ((y >= region->y) && (y < (region->y+(ssize_t) region->height)))) pixel=(Quantum) 0; switch (type) { case ReadPixelMask: { SetPixelReadMask(image,pixel,q); break; } case WritePixelMask: { SetPixelWriteMask(image,pixel,q); break; } default: { SetPixelCompositeMask(image,pixel,q); break; } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const Quantum *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const Quantum *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) exception; DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelInfo *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterType) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->matte_color, exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } /* Pointer to allow the lookup of pre-image artifact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preserved when parenthesis ends. */ if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); image->image_info=CloneImageInfo(image_info); return(MagickTrue); }
linearFiltering-laplacian.c
/************************************************************************* * linearFiltering-laplacian.c - * * $Id$ * * Copyright (c) INRIA 2012, all rights reserved * * AUTHOR: * Gregoire Malandain (gregoire.malandain@inria.fr) * * CREATION DATE: * Wed Dec 26 22:19:17 CET 2012 * * ADDITIONS, CHANGES * */ /* WARNING, this file is not aimed to be computed * it is included from linearFiltering.c */ int laplacian2D( void *bufferIn, bufferType typeIn, void *bufferOut, bufferType typeOut, int *bufferDims, int *borderLengths, typeFilteringCoefficients *theFilter ) { char *proc = "laplacian2D"; size_t dimx, dimy, dimz; size_t sizeAuxBuf = 0; typeFilteringCoefficients filter[3]; float *auxBuf = NULL; float *tmpBuf = NULL; float *lapBuf = NULL; long int i; dimx = bufferDims[0]; dimy = bufferDims[1]; dimz = bufferDims[2]; sizeAuxBuf = dimx*dimy*dimz; if ( typeOut != FLOAT || bufferIn == bufferOut ) sizeAuxBuf *= 2; /* allocation des buffers de calcul */ auxBuf = (float*)vtmalloc( sizeAuxBuf * sizeof(float), "auxBuf", proc ); if ( auxBuf == NULL ) { if ( _verbose_ > 0 ) fprintf( stderr, "%s: unable to allocate auxiliary buffer\n", proc ); return( -1 ); } tmpBuf = auxBuf; if ( typeOut != FLOAT || bufferIn == bufferOut ) { lapBuf = tmpBuf; lapBuf += dimx*dimy*dimz; } else { lapBuf = (float*)bufferOut; } /* filtering */ filter[0] = theFilter[0]; filter[1] = theFilter[1]; filter[2] = theFilter[2]; /* derivative along X */ filter[0].derivative = DERIVATIVE_2; filter[1].derivative = DERIVATIVE_0; filter[2].derivative = NODERIVATIVE; if ( separableLinearFiltering( bufferIn, typeIn, (void*)lapBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute X derivative (2D)\n", proc ); vtfree( auxBuf ); return( -1 ); } /* derivative along Y */ filter[0].derivative = DERIVATIVE_0; filter[1].derivative = DERIVATIVE_2; filter[2].derivative = NODERIVATIVE; if ( separableLinearFiltering( bufferIn, typeIn, (void*)tmpBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute Y derivative (2D)\n", proc ); vtfree( auxBuf ); return( -1 ); } sizeAuxBuf = dimx*dimy*dimz; #ifdef _OPENMP #pragma omp parallel for #endif for ( i = 0; i < (long int)sizeAuxBuf; i++ ) lapBuf[i] += tmpBuf[i]; if ( lapBuf != bufferOut ) { if ( ConvertBuffer( lapBuf, FLOAT, bufferOut, typeOut, sizeAuxBuf ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to convert buffer\n", proc ); vtfree( auxBuf ); return( -1 ); } } vtfree( auxBuf ); return( 1 ); } int laplacian3D( void *bufferIn, bufferType typeIn, void *bufferOut, bufferType typeOut, int *bufferDims, int *borderLengths, typeFilteringCoefficients *theFilter ) { char *proc = "laplacian3D"; size_t dimx, dimy, dimz; size_t sizeAuxBuf = 0; typeFilteringCoefficients filter[3]; float *auxBuf = NULL; float *tmpBuf = NULL; float *lapBuf = NULL; long int i; dimx = bufferDims[0]; dimy = bufferDims[1]; dimz = bufferDims[2]; sizeAuxBuf = dimx*dimy*dimz; if ( typeOut != FLOAT || bufferIn == bufferOut ) sizeAuxBuf *= 2; /* allocation des buffers de calcul */ auxBuf = (float*)vtmalloc( sizeAuxBuf * sizeof(float), "auxBuf", proc ); if ( auxBuf == NULL ) { if ( _verbose_ > 0 ) fprintf( stderr, "%s: unable to allocate auxiliary buffer\n", proc ); return( -1 ); } tmpBuf = auxBuf; if ( typeOut != FLOAT || bufferIn == bufferOut ) { lapBuf = tmpBuf; lapBuf += dimx*dimy*dimz; } else { lapBuf = (float*)bufferOut; } /* filtering */ filter[0] = theFilter[0]; filter[1] = theFilter[1]; filter[2] = theFilter[2]; /* smoothing along Z */ filter[0].derivative = NODERIVATIVE; filter[1].derivative = NODERIVATIVE; filter[2].derivative = DERIVATIVE_0; if ( separableLinearFiltering( bufferIn, typeIn, (void*)tmpBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute Z smoothing (3D)\n", proc ); vtfree( auxBuf ); return( -1 ); } /* derivative along X */ filter[0].derivative = DERIVATIVE_2; filter[1].derivative = DERIVATIVE_0; filter[2].derivative = NODERIVATIVE; if ( separableLinearFiltering( (void*)tmpBuf, FLOAT, (void*)lapBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute X derivative (3D)\n", proc ); vtfree( auxBuf ); return( -1 ); } /* derivative along Y */ filter[0].derivative = DERIVATIVE_0; filter[1].derivative = DERIVATIVE_2; filter[2].derivative = NODERIVATIVE; if ( separableLinearFiltering( (void*)tmpBuf, FLOAT, (void*)tmpBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute Y derivative (3D)\n", proc ); vtfree( auxBuf ); return( -1 ); } sizeAuxBuf = dimx*dimy*dimz; #ifdef _OPENMP #pragma omp parallel for #endif for ( i = 0; i < (long int)sizeAuxBuf; i++ ) lapBuf[i] += tmpBuf[i]; /* derivative along Z */ filter[0].derivative = DERIVATIVE_0; filter[1].derivative = DERIVATIVE_0; filter[2].derivative = DERIVATIVE_2; if ( separableLinearFiltering( bufferIn, typeIn, (void*)tmpBuf, FLOAT, bufferDims, borderLengths, filter ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to compute Z derivative (3D)\n", proc ); vtfree( auxBuf ); return( -1 ); } #ifdef _OPENMP #pragma omp parallel for #endif for ( i = 0; i < (long int)sizeAuxBuf; i++ ) lapBuf[i] += tmpBuf[i]; if ( lapBuf != bufferOut ) { if ( ConvertBuffer( lapBuf, FLOAT, bufferOut, typeOut, sizeAuxBuf ) != 1 ) { if ( _verbose_ ) fprintf( stderr, "%s: unable to convert buffer\n", proc ); vtfree( auxBuf ); return( -1 ); } } vtfree( auxBuf ); return( 1 ); } int laplacian( void *bufferIn, bufferType typeIn, void *bufferOut, bufferType typeOut, int *bufferDims, int *borderLengths, typeFilteringCoefficients *theFilter ) { if ( bufferDims[2] == 1 ) return( laplacian2D( bufferIn, typeIn, bufferOut, typeOut, bufferDims, borderLengths, theFilter ) ); else return( laplacian3D( bufferIn, typeIn, bufferOut, typeOut, bufferDims, borderLengths, theFilter ) ); }
SoftMax.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SoftMax.c" #else #ifdef _MSC_VER #define SOFTMAX_SIZE_TYPE int64_t #define SOFTMAX_CAST_TYPE (int64_t) #else #define SOFTMAX_SIZE_TYPE uint64_t #define SOFTMAX_CAST_TYPE #endif void THNN_(SoftMax_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int dim) { THArgCheck(dim >= 0 && dim < input->nDimension, 4, "dim out of range (got %d, but input has %d dims)", dim, input->nDimension); uint64_t outer_size = 1; uint64_t dim_size = input->size[dim]; uint64_t inner_size = 1; for (uint64_t i = 0; i < dim; ++i) outer_size *= input->size[i]; for (uint64_t i = dim + 1; i < input->nDimension; ++i) inner_size *= input->size[i]; input = THTensor_(newContiguous)(input); THTensor_(resizeAs)(output, input); real *input_data_base = THTensor_(data)(input); real *output_data_base = THTensor_(data)(output); uint64_t dim_stride = inner_size; uint64_t outer_stride = dim_size * dim_stride; SOFTMAX_SIZE_TYPE i, d; #pragma omp parallel for private(i, d) for (i = 0; i < SOFTMAX_CAST_TYPE (outer_size * inner_size); i++) { uint64_t outer_idx = i / inner_size; uint64_t inner_idx = i % inner_size; real *input_data = input_data_base + outer_idx * outer_stride + inner_idx; real *output_data = output_data_base + outer_idx * outer_stride + inner_idx; real input_max = -THInf; for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) { if (input_data[d * dim_stride] >= input_max) input_max = input_data[d * dim_stride]; } accreal sum = 0; for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) { real z = exp(input_data[d * dim_stride] - input_max); output_data[d * dim_stride] = z; sum += z; } real invsum = 1 / sum; // NOTE: truncate sum to real once for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) { output_data[d * dim_stride] *= invsum; } } THTensor_(free)(input); } void THNN_(SoftMax_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output, int dim) { THNN_CHECK_SHAPE(output, gradOutput); THArgCheck(dim >= 0 && dim < output->nDimension, 6, "dim out of range (got %d, but input has %d dims)", dim, output->nDimension); uint64_t outer_size = 1; uint64_t dim_size = output->size[dim]; uint64_t inner_size = 1; for (uint64_t i = 0; i < dim; ++i) outer_size *= output->size[i]; for (uint64_t i = dim + 1; i < output->nDimension; ++i) inner_size *= output->size[i]; gradOutput = THTensor_(newContiguous)(gradOutput); output = THTensor_(newContiguous)(output); THTensor_(resizeAs)(gradInput, output); real *gradInput_data_base = THTensor_(data)(gradInput); real *output_data_base = THTensor_(data)(output); real *gradOutput_data_base = THTensor_(data)(gradOutput); uint64_t dim_stride = inner_size; uint64_t outer_stride = dim_size * dim_stride; SOFTMAX_SIZE_TYPE i, d; #pragma omp parallel for private(i, d) for (i = 0; i < SOFTMAX_CAST_TYPE (outer_size * inner_size); i++) { uint64_t outer_idx = i / inner_size; uint64_t inner_idx = i % inner_size; real *gradInput_data = gradInput_data_base + outer_idx * outer_stride + inner_idx; real *output_data = output_data_base + outer_idx * outer_stride + inner_idx; real *gradOutput_data = gradOutput_data_base + outer_idx * outer_stride + inner_idx; accreal sum = 0; for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) sum += ((accreal)gradOutput_data[d * dim_stride]) * ((accreal)output_data[d * dim_stride]); for (d = 0; d < SOFTMAX_CAST_TYPE dim_size; d++) gradInput_data[d * dim_stride] = output_data[d * dim_stride] * (gradOutput_data[d * dim_stride] - sum); } THTensor_(free)(gradOutput); THTensor_(free)(output); } #endif
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define ErrorRelativeWeight PerceptibleReciprocal(16) #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double diffusion, weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *), SetImageColormap(Image *,CubeInfo *,ExceptionInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DefineImageColormap(Image *,CubeInfo *,NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); if (SetImageColormap(image,cube_info,exception) == MagickFalse) return(MagickFalse); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; Quantum *magick_restrict q; ssize_t count, x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; const NodeInfo *node_info; ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && (IsGrayColorspace(cube_info->quantize_info->colorspace))) { double intensity; /* Monochrome image. */ intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 : QuantumRange; if (image->colors > 1) { intensity=0.0; if (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1)) intensity=(double) QuantumRange; } image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; double bisect; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if (cube_info->quantize_info->colorspace != image->colorspace) { if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace, exception); } midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != 0) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if (cube_info->quantize_info->colorspace != image->colorspace) if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha, beta, distance, pixel; DoublePixelPacket *magick_restrict q; PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*p->alpha); beta=(MagickRealType) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the DefineImageColormap method is: % % void DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double alpha; PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; Quantum *magick_restrict q; size_t index; ssize_t x, v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16; pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16; pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*cube_info->diffusion*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=cube_info->diffusion*previous[u+v].red/16; pixel.green+=cube_info->diffusion*previous[u+v].green/16; pixel.blue+=cube_info->diffusion*previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=cube_info->diffusion*previous[u+v].alpha/16; } pixel.red+=5.0*cube_info->diffusion*previous[u].red/16; pixel.green+=5.0*cube_info->diffusion*previous[u].green/16; pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*cube_info->diffusion*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16; pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16; pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*cube_info->diffusion*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { NodeInfo *node_info; size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CubeInfo *p; DoublePixelPacket color, pixel; MagickBooleanType proceed; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { Quantum *magick_restrict q; ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].red; pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].green; pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]* p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { NodeInfo *node_info; size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType Riemersma(Image *image,CacheView *image_view, CubeInfo *cube_info,const size_t level,const unsigned int direction, ExceptionInfo *exception) { MagickBooleanType status; status=MagickTrue; if (level == 1) switch (direction) { case WestGravity: { status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,EastGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { status=Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,WestGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); if (status != MagickFalse) status=Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } return(status); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; size_t extent, level; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) cube_info->diffusion=StringToDoubleInterval(artifact,1.0); if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; extent=MagickMax(image->columns,image->rows); level=(size_t) log2((double) extent); if (((size_t) 1UL << level) < extent) level++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); status=MagickTrue; if (level > 0) status=Riemersma(image,image_view,cube_info,level,NorthGravity,exception); if (status != MagickFalse) status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double weight; size_t length; ssize_t i; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]=PerceptibleReciprocal(weight); weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0)); } cube_info->diffusion=1.0; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait != UndefinedPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K m e a n s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KmeansImage() applies k-means color reduction to an image. This is a % colorspace clustering or segmentation technique. % % The format of the KmeansImage method is: % % MagickBooleanType KmeansImage(Image *image,const size_t number_colors, % const size_t max_iterations,const double tolerance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_colors: number of colors to use as seeds. % % o max_iterations: maximum number of iterations while converging. % % o tolerance: the maximum tolerance. % % o exception: return any errors or warnings in this structure. % */ typedef struct _KmeansInfo { double red, green, blue, alpha, black, count, distortion; } KmeansInfo; static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info) { ssize_t i; assert(kmeans_info != (KmeansInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (kmeans_info[i] != (KmeansInfo *) NULL) kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]); kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info); return(kmeans_info); } static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors) { KmeansInfo **kmeans_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads, sizeof(*kmeans_info)); if (kmeans_info == (KmeansInfo **) NULL) return((KmeansInfo **) NULL); (void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info)); for (i=0; i < (ssize_t) number_threads; i++) { kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors, sizeof(**kmeans_info)); if (kmeans_info[i] == (KmeansInfo *) NULL) return(DestroyKmeansThreadSet(kmeans_info)); } return(kmeans_info); } static inline double KmeansMetric(const Image *magick_restrict image, const Quantum *magick_restrict p,const PixelInfo *magick_restrict q) { double gamma, metric, pixel; gamma=1.0; metric=0.0; if ((image->alpha_trait != UndefinedPixelTrait) || (q->alpha_trait != UndefinedPixelTrait)) { pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha); metric+=pixel*pixel; if (image->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*GetPixelAlpha(image,p); if (q->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*q->alpha; } if (image->colorspace == CMYKColorspace) { pixel=QuantumScale*(GetPixelBlack(image,p)-q->black); metric+=gamma*pixel*pixel; gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p)); gamma*=QuantumScale*(QuantumRange-q->black); } metric*=3.0; pixel=QuantumScale*(GetPixelRed(image,p)-q->red); if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs((double) pixel) > 0.5) pixel-=0.5; pixel*=2.0; } metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelGreen(image,p)-q->green); metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue); metric+=gamma*pixel*pixel; return(metric); } MagickExport MagickBooleanType KmeansImage(Image *image, const size_t number_colors,const size_t max_iterations,const double tolerance, ExceptionInfo *exception) { #define KmeansImageTag "Kmeans/Image" #define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info)) CacheView *image_view; const char *colors; double previous_tolerance; KmeansInfo **kmeans_pixels; MagickBooleanType verbose, status; ssize_t n; size_t number_threads; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colors=GetImageArtifact(image,"kmeans:seed-colors"); if (colors == (const char *) NULL) { CubeInfo *cube_info; QuantizeInfo *quantize_info; size_t depth; /* Seed clusters from color quantization. */ quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->colorspace=image->colorspace; quantize_info->number_colors=number_colors; quantize_info->dither_method=NoDitherMethod; n=number_colors; for (depth=1; n != 0; depth++) n>>=2; cube_info=GetCubeInfo(quantize_info,depth,number_colors); if (cube_info == (CubeInfo *) NULL) { quantize_info=DestroyQuantizeInfo(quantize_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=SetImageColormap(image,cube_info,exception); } DestroyCubeInfo(cube_info); quantize_info=DestroyQuantizeInfo(quantize_info); if (status == MagickFalse) return(status); } else { char color[MagickPathExtent]; const char *p; /* Seed clusters from color list (e.g. red;green;blue). */ status=AcquireImageColormap(image,number_colors,exception); if (status == MagickFalse) return(status); for (n=0, p=colors; n < (ssize_t) image->colors; n++) { const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,image->colormap+n, exception); if (*q == '\0') { n++; break; } p=q+1; } if (n < (ssize_t) image->colors) { RandomInfo *random_info; /* Seed clusters from random values. */ random_info=AcquireRandomInfo(); for ( ; n < (ssize_t) image->colors; n++) { (void) QueryColorCompliance("#000",AllCompliance,image->colormap+n, exception); image->colormap[n].red=RandomColorComponent(random_info); image->colormap[n].green=RandomColorComponent(random_info); image->colormap[n].blue=RandomColorComponent(random_info); if (image->alpha_trait != UndefinedPixelTrait) image->colormap[n].alpha=RandomColorComponent(random_info); if (image->colorspace == CMYKColorspace) image->colormap[n].black=RandomColorComponent(random_info); } random_info=DestroyRandomInfo(random_info); } } /* Iterative refinement. */ kmeans_pixels=AcquireKmeansThreadSet(number_colors); if (kmeans_pixels == (KmeansInfo **) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); previous_tolerance=0.0; verbose=IsStringTrue(GetImageArtifact(image,"debug")); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view=AcquireAuthenticCacheView(image,exception); for (n=0; n < (ssize_t) max_iterations; n++) { double distortion; ssize_t j, y; for (j=0; j < (ssize_t) number_threads; j++) (void) memset(kmeans_pixels[j],0,image->colors*sizeof(*kmeans_pixels[j])); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double min_distance; ssize_t i, k; /* Assign each pixel whose mean has the least squared color distance. */ k=0; min_distance=KmeansMetric(image,q,image->colormap+0); for (i=1; i < (ssize_t) image->colors; i++) { double distance; if (min_distance <= MagickEpsilon) break; distance=KmeansMetric(image,q,image->colormap+i); if (distance < min_distance) { min_distance=distance; k=i; } } kmeans_pixels[id][k].red+=QuantumScale*GetPixelRed(image,q); kmeans_pixels[id][k].green+=QuantumScale*GetPixelGreen(image,q); kmeans_pixels[id][k].blue+=QuantumScale*GetPixelBlue(image,q); if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[id][k].alpha+=QuantumScale*GetPixelAlpha(image,q); if (image->colorspace == CMYKColorspace) kmeans_pixels[id][k].black+=QuantumScale*GetPixelBlack(image,q); kmeans_pixels[id][k].count++; kmeans_pixels[id][k].distortion+=min_distance; SetPixelIndex(image,(Quantum) k,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } if (status == MagickFalse) break; /* Reduce sums to [0] entry. */ for (j=1; j < (ssize_t) number_threads; j++) { ssize_t k; for (k=0; k < (ssize_t) image->colors; k++) { kmeans_pixels[0][k].red+=kmeans_pixels[j][k].red; kmeans_pixels[0][k].green+=kmeans_pixels[j][k].green; kmeans_pixels[0][k].blue+=kmeans_pixels[j][k].blue; if (image->alpha_trait != UndefinedPixelTrait) kmeans_pixels[0][k].alpha+=kmeans_pixels[j][k].alpha; if (image->colorspace == CMYKColorspace) kmeans_pixels[0][k].black+=kmeans_pixels[j][k].black; kmeans_pixels[0][k].count+=kmeans_pixels[j][k].count; kmeans_pixels[0][k].distortion+=kmeans_pixels[j][k].distortion; } } /* Calculate the new means (centroids) of the pixels in the new clusters. */ distortion=0.0; for (j=0; j < (ssize_t) image->colors; j++) { double gamma; gamma=PerceptibleReciprocal((double) kmeans_pixels[0][j].count); image->colormap[j].red=gamma*QuantumRange*kmeans_pixels[0][j].red; image->colormap[j].green=gamma*QuantumRange*kmeans_pixels[0][j].green; image->colormap[j].blue=gamma*QuantumRange*kmeans_pixels[0][j].blue; if (image->alpha_trait != UndefinedPixelTrait) image->colormap[j].alpha=gamma*QuantumRange*kmeans_pixels[0][j].alpha; if (image->colorspace == CMYKColorspace) image->colormap[j].black=gamma*QuantumRange*kmeans_pixels[0][j].black; distortion+=kmeans_pixels[0][j].distortion; } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n, GetMagickPrecision(),distortion,GetMagickPrecision(), fabs(distortion-previous_tolerance)); if (fabs(distortion-previous_tolerance) <= tolerance) break; previous_tolerance=distortion; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n, max_iterations); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels); if (image->progress_monitor != (MagickProgressMonitor) NULL) (void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType) max_iterations-1,max_iterations); if (status == MagickFalse) return(status); return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); if (cube_info->nodes > cube_info->maximum_colors) { /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; ImageType type; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; type=IdentifyImageType(image,exception); if (IsGrayImageType(type) != MagickFalse) (void) SetGrayscaleImage(image,exception); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait != UndefinedPixelTrait) && (depth > 5)) depth--; if (IsGrayImageType(type) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; size_t depth, maximum_colors, number_images; ssize_t i; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { size_t n, number_children; ssize_t i; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { size_t number_children; ssize_t i; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; size_t extent; ssize_t *colormap_index, i, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColormap() traverses the color cube tree and sets the colormap of % the image. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. % % The format of the SetImageColormap method is: % % MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, % ExceptionInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType SetImageColormap(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { size_t number_colors; number_colors=MagickMax(cube_info->maximum_colors,cube_info->colors); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; DefineImageColormap(image,cube_info,cube_info->root); if (image->colors != number_colors) { image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } return(MagickTrue); }
matrix_mul_par.c
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <omp.h> /* Simple matrix multiplication example. */ /* matrix multiplication */ void matrix_mult(double const * A, double const * B, double * C, int const N, int const M, int const K) { int BS2 = 64; int BS1 = 64; for (int ii = 0; ii < N; ii++) { for (int jj = 0; jj < K; jj++) { //C[i][j] = 0; C[K * ii + jj] = 0; } } for (size_t l_block = 0; l_block < M; l_block = l_block + (BS1)) { for (size_t j_block = 0; j_block < K; j_block = j_block + (BS2)) { for (int i = 0; i < N; i++) { int l_limit = l_block + BS1; if (l_limit > M) l_limit = M; for (int l = l_block; l < l_limit; l++) { int j_limit = j_block + BS2; if (j_limit > K) j_limit = K; for (int j = j_block; j < j_limit; j++) { //C[i][j] += A[i][l]*B[l][j]; C[K * i + j] = C[K * i + j] + (A[M * i + l] * B[K * l + j]); } } } } } } void matrix_mult_call_specialization_0(double A[131072], double B[131072], double C[262144], int const N, int const M, int const K) { int BS2 = 64; int BS1 = 64; #pragma omp parallel for for (int ii = 0; ii < N; ii++) { for (int jj = 0; jj < K; jj++) { //C[i][j] = 0; C[K * ii + jj] = 0; } } #pragma omp parallel for default(shared) firstprivate(A, B, M, BS1, K, BS2, N) reduction (+:C[:262144]) for (int l_block = 0; l_block < M; l_block = l_block + (BS1)) { for (int j_block = 0; j_block < K; j_block = j_block + (BS2)) { for (int i = 0; i < N; i++) { int l_limit = l_block + BS1; if (l_limit > M) l_limit = M; for (int l = l_block; l < l_limit; l++) { int j_limit = j_block + BS2; if (j_limit > K) j_limit = K; for (int j = j_block; j < j_limit; j++) { //C[i][j] += A[i][l]*B[l][j]; C[K * i + j] = C[K * i + j] + (A[M * i + l] * B[K * l + j]); } } } } } } /* * Set an N by M matrix A to random values */ void init_matrix(double * A, int const N, int const M) { for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { //A[i][j] = ((double) rand()) / (double) RAND_MAX; A[M * i + j] = ((double) rand()) / (double) 32767; } } } void print_matrix_result(double * A, int const N, int const K) { double acc = 0.0; for (int i = 0; i < N; ++i) { for (int j = 0; j < K; ++j) { //acc += A[i][j]; acc = acc + (A[K * i + j]); } } printf("Result acc: %f\n", acc); } void test_matrix_mul() { int N = 512; int M = 256; int K = 512; //double A[N][M]; //double B[M][K]; //double C[N][K]; double * A = (double *) malloc(N * M * sizeof(double)); double * B = (double *) malloc(M * K * sizeof(double)); double * C = (double *) malloc(N * K * sizeof(double)); // initialize matrices init_matrix(A, N, M); init_matrix(B, M, K); // do: C = A*B matrix_mult_call_specialization_0(A, B, C, N, M, K); print_matrix_result(C, N, K); free(A); free(B); free(C); } int main() { // To make results repeatable srand(0); test_matrix_mul(); }
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/delegate.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/timer.h" #include "magick/token.h" #include "magick/token-private.h" #include "magick/utility.h" #include "magick/version.h" #include "magick/xwindow-private.h" /* Constant declaration. */ const char BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", MatteColor[] = "#bdbdbd", /* gray */ PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MaxTextExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; image->blur=1.0; InitializeExceptionInfo(&image->exception); (void) QueryColorDatabase(BackgroundColor,&image->background_color, &image->exception); (void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception); (void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception); (void) QueryColorDatabase(TransparentColor,&image->transparent_color, &image->exception); GetTimerInfo(&image->timer); image->ping=MagickFalse; image->cache=AcquirePixelCache(0); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=time((time_t *) NULL); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AllocateSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->matte_color=image_info->matte_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); (void) SyncImageSettings(image_info,image); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info)); if (image_info == (ImageInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MaxTextExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MaxTextExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, matte, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); matte=images->matte; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass) == MagickFalse) { InheritException(exception,&append_image->exception); append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace); append_image->depth=depth; append_image->matte=matte; append_image->page=images->page; (void) SetImageBackgroundColor(append_image); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict append_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); append_indexes=GetCacheViewAuthenticIndexQueue(append_view); for (x=0; x < (ssize_t) next->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (next->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if ((next->colorspace == CMYKColorspace) && (append_image->colorspace == CMYKColorspace)) SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x)); p++; q++; } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); GetImageException(image,exception); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipImage(Image *image) { return(ClipImagePath(image,"#1",MagickTrue)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(&image->exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent); clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask); if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse); (void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageClipMask(image,clip_mask); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { double scale; Image *clone_image; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->channels=image->channels; clone_image->colorspace=image->colorspace; clone_image->matte=image->matte; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); InitializeExceptionInfo(&clone_image->exception); InheritException(&clone_image->exception,&image->exception); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MaxTextExtent); (void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent); (void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); clone_image->clip_mask=NewImageList(); clone_image->mask=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AllocateSemaphoreInfo(); if (image->colormap != (PixelPacket *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelPacket *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } if ((columns == image->columns) && (rows == image->rows)) { if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows) == MagickFalse) { InheritException(exception,&clone_image->exception); clone_image=DestroyImage(clone_image); } return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->pen=image_info->pen; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->matte_color=image_info->matte_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colors=image_info->colors; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->preview_type=image_info->preview_type; clone_info->group=image_info->group; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; if (image_info->view != (char *) NULL) (void) CloneString(&clone_info->view,image_info->view); if (image_info->authenticate != (char *) NULL) (void) CloneString(&clone_info->authenticate,image_info->authenticate); (void) CloneImageOptions(clone_info,image_info); clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->virtual_pixel_method=image_info->virtual_pixel_method; (void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent); (void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MaxTextExtent); clone_info->subimage=image_info->scene; /* deprecated */ clone_info->subrange=image_info->number_scenes; /* deprecated */ clone_info->channel=image_info->channel; clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return the highest severity exception. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) geometry->width; x++) { *q=(*p); if (image->colorspace == CMYKColorspace) indexes[x]=source_indexes[x]; p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); source_view=DestroyCacheView(source_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelPacket *) NULL) image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info*) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); DestroyBlob(image); (void) ClearExceptionInfo(&image->exception,MagickTrue); if (image->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); if (image_info->authenticate != (char *) NULL) image_info->authenticate=DestroyString( image_info->authenticate); DestroyImageOptions(image_info); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClipMask() returns the clip path associated with the image. % % The format of the GetImageClipMask method is: % % Image *GetImageClipMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageClipMask(const Image *image, ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->clip_mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->clip_mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageException() traverses an image sequence and returns any % error more severe than noted by the exception parameter. % % The format of the GetImageException method is: % % void GetImageException(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to a list of one or more images. % % o exception: return the highest severity exception. % */ MagickExport void GetImageException(Image *image,ExceptionInfo *exception) { register Image *next; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->exception.severity == UndefinedException) continue; if (next->exception.severity > exception->severity) InheritException(exception,&next->exception); next->exception.severity=UndefinedException; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorDatabase(BackgroundColor,&image_info->background_color, exception); (void) QueryColorDatabase(BorderColor,&image_info->border_color,exception); (void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception); (void) QueryColorDatabase(TransparentColor,&image_info->transparent_color, exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannels() returns the number of pixel channels associated with the % specified image. % % The format of the GetChannels method is: % % size_t GetImageChannels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport size_t GetImageChannels(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(image->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename) { char *q; int c; MagickBooleanType canonical; register const char *p; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MaxTextExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MaxTextExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MaxTextExtent]; const char *value; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; value=(const char *) NULL; if (image != (Image *) NULL) value=GetImageProperty(image,pattern); if ((value == (const char *) NULL) && (image != (Image *) NULL)) value=GetImageArtifact(image,pattern); if ((value == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) value=GetImageOption(image_info,pattern); if (value == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),value,(size_t) (MaxTextExtent-(p-format-offset))); offset+=strlen(pattern)-4; *q=c; (void) ConcatenateMagickString(filename,r+1,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) { (void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename))); canonical=MagickTrue; } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MaxTextExtent); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((pixel.red < 0.0) || (pixel.red > QuantumRange) || (pixel.red != (QuantumAny) pixel.red)) break; if ((pixel.green < 0.0) || (pixel.green > QuantumRange) || (pixel.green != (QuantumAny) pixel.green)) break; if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) || (pixel.blue != (QuantumAny) pixel.blue)) break; if (pixel.matte != MagickFalse) { if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) || (pixel.opacity != (QuantumAny) pixel.opacity)) break; } if (pixel.colorspace == CMYKColorspace) { if ((pixel.index < 0.0) || (pixel.index > QuantumRange) || (pixel.index != (QuantumAny) pixel.index)) break; } p++; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MaxTextExtent], filename[MaxTextExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MaxTextExtent); (void) CopyMagickString(filename,image->filename,MaxTextExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const MagickPixelPacket *background) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const MagickPixelPacket *background) { CacheView *image_view; ExceptionInfo *exception; Image *image; ssize_t y; MagickBooleanType status; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const MagickPixelPacket *) NULL); image=AcquireImage(image_info); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->matte=background->matte; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,background,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; const void *pixels; MagickBooleanType status; MagickSizeType length; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset((void *) pixels,0,(size_t) length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,sizeof(PixelPacket)); if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) indexes[x]=0; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,RGBColorspace); if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; pixel.opacity=OpaqueOpacity; SetPixelPacket(image,&background,&pixel,&index); /* Set image background color. */ status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) *q++=pixel; if (image->colorspace == CMYKColorspace) { register IndexPacket *magick_restrict indexes; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannels() sets the number of pixels channels associated with the % image. % % The format of the SetImageChannels method is: % % MagickBooleanType SetImageChannels(Image *image,const size_t channels) % % A description of each parameter follows: % % o image: the image. % % o channels: The number of pixel channels. % */ MagickExport MagickBooleanType SetImageChannels(Image *image, const size_t channels) { image->channels=channels; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image, % const MagickPixelPacket *color) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const MagickPixelPacket *color) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const MagickPixelPacket *) NULL); image->colorspace=color->colorspace; image->matte=color->matte; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,color,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->storage_class=storage_class; return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageClipMask() associates a clip path with the image. The clip path % must be the same dimensions as the image. Set any pixel component of % the clip path to TransparentOpacity to prevent that corresponding image % pixel component from being updated when SyncAuthenticPixels() is applied. % % The format of the SetImageClipMask method is: % % MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask) % % A description of each parameter follows: % % o image: the image. % % o clip_mask: the image clip path. % */ MagickExport MagickBooleanType SetImageClipMask(Image *image, const Image *clip_mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (clip_mask != (const Image *) NULL) if ((clip_mask->columns != image->columns) || (clip_mask->rows != image->rows)) ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); image->clip_mask=NewImageList(); if (clip_mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception); if (image->clip_mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows) { if ((columns == 0) || (rows == 0)) ThrowBinaryImageException(ImageError,"NegativeOrZeroImageSize", image->filename); image->columns=columns; image->rows=rows; if ((image->depth == 0) || (image->depth > (8*sizeof(MagickSizeType)))) ThrowBinaryImageException(ImageError,"ImageDepthNotSupported", image->filename); return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the `magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, `ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: `image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char extension[MaxTextExtent], filename[MaxTextExtent], magic[MaxTextExtent], *q, subimage[MaxTextExtent]; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; unsigned char magick[2*MaxTextExtent]; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *subimage='\0'; GetPathComponent(image_info->filename,SubimagePath,subimage); if (*subimage != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse) { if (IsGeometry(subimage) != MagickFalse) (void) CloneString(&image_info->extract,subimage); } else { size_t first, last; (void) CloneString(&image_info->scenes,subimage); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; image_info->subimage=image_info->scene; image_info->subrange=image_info->number_scenes; } } *extension='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,extension); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*extension != '\0') if ((LocaleCompare(extension,"gz") == 0) || (LocaleCompare(extension,"Z") == 0) || (LocaleCompare(extension,"svgz") == 0) || (LocaleCompare(extension,"wmz") == 0)) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*extension != '\0') if (LocaleCompare(extension,"bz2") == 0) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*extension != '\0') && (IsGlob(extension) == MagickFalse)) { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,extension,MaxTextExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MaxTextExtent); magick_info=GetMagickInfo(magic,sans_exception); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,filename); if ((LocaleCompare(filename,image_info->filename) != 0) && (strchr(filename,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { /* Determine the image format from the first few bytes of the file. */ image=AcquireImage(image_info); (void) CopyMagickString(image->filename,image_info->filename, MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to a seekable temporary file. */ *filename='\0'; status=ImageToFile(image,filename,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,filename,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); image_info->temporary=MagickTrue; } (void) memset(magick,0,sizeof(magick)); count=ReadBlob(image,2*MaxTextExtent,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic.xml configuration file. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { (void) CopyMagickString(image_info->magick,GetMagicName(magic_info), MaxTextExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const Image *mask) % % A description of each parameter follows: % % o image: the image. % % o mask: the image mask. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask != (const Image *) NULL) if ((mask->columns != image->columns) || (mask->rows != image->rows)) ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); image->mask=NewImageList(); if (mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception); if (image->mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e O p a c i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageOpacity() sets the opacity levels of the image. % % The format of the SetImageOpacity method is: % % MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % */ MagickExport MagickBooleanType SetImageOpacity(Image *image, const Quantum opacity) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->matte=MagickTrue; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelOpacity(q,opacity); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const PixelPacket *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const PixelPacket *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" CacheView *smush_view; const Image *image; Image *smush_image; MagickBooleanType matte, proceed, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; matte=image->matte; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse) { InheritException(exception,&smush_image->exception); smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->matte=matte; (void) SetImageBackgroundColor(smush_image); status=MagickTrue; x_offset=0; y_offset=0; smush_view=AcquireVirtualCacheView(smush_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; smush_view=DestroyCacheView(smush_view); if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType StripImage(Image *image) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline IndexPacket PushColormapIndex(Image *image, const size_t index,MagickBooleanType *range_exception) { if (index < image->colors) return((IndexPacket) index); *range_exception=MagickTrue; return((IndexPacket) 0); } MagickExport MagickBooleanType SyncImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelPacket *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket index; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x), &range_exception); if (image->matte == MagickFalse) SetPixelRgb(q,image->colormap+(ssize_t) index) else SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs image_info options into per-image attributes. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image) { char property[MaxTextExtent]; const char *option, *value; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->background_color, &image->exception); option=GetImageOption(image_info,"bias"); if (option != (const char *) NULL) image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->border_color,&image->exception); option=GetImageOption(image_info,"colors"); if (option != (const char *) NULL) image->colors=StringToUnsignedLong(option); option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { GeometryInfo geometry_info; /* Set image density. */ flags=ParseGeometry(option,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(InterpolatePixelMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->matte_color,&image->exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->transparent_color, &image->exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); else units = image_info->units; if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->x_resolution/=2.54; image->y_resolution/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->x_resolution=(double) ((size_t) (100.0*2.54* image->x_resolution+0.5))/100.0; image->y_resolution=(double) ((size_t) (100.0*2.54* image->y_resolution+0.5))/100.0; } break; } default: break; } image->units=units; } option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } ResetImageOptionIterator(image_info); for (option=GetNextImageOption(image_info); option != (const char *) NULL; ) { value=GetImageOption(image_info,option); if (value != (const char *) NULL) { (void) FormatLocaleString(property,MaxTextExtent,"%s",option); (void) SetImageArtifact(image,property,value); } option=GetNextImageOption(image_info); } return(MagickTrue); }
GB_binop__bget_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__bget_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint8) // C=scalar+B GB (_bind1st__bget_uint8) // C=scalar+B' GB (_bind1st_tran__bget_uint8) // C=A+scalar GB (_bind2nd__bget_uint8) // C=A'+scalar GB (_bind2nd_tran__bget_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_BITGET (aij, bij, uint8_t, 8) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT8 || GxB_NO_BGET_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bget_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bget_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bget_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bget_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3DConvolution.c
/** * 3DConvolution.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <omp.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include "../../common/polybenchUtilFuncts.h" // define the error threshold for the results "not matching" #define ERROR_THRESHOLD 0.5 #define GPU 1 /* Problem size */ #define NI 512 #define NJ 512 #define NK 512 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void conv3D(DATA_TYPE *A, DATA_TYPE *B) { int i, j, k; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; for (j = 1; j < NJ - 1; ++j) { for (i = 1; i < NI - 1; ++i) { for (k = 1; k < NK - 1; ++k) { B[i * (NK * NJ) + j * NK + k] = c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c21 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c23 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c31 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c33 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c12 * A[(i + 0) * (NK * NJ) + (j - 1) * NK + (k + 0)] + c22 * A[(i + 0) * (NK * NJ) + (j + 0) * NK + (k + 0)] + c32 * A[(i + 0) * (NK * NJ) + (j + 1) * NK + (k + 0)] + c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c21 * A[(i - 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c23 * A[(i + 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c31 * A[(i - 1) * (NK * NJ) + (j + 1) * NK + (k + 1)] + c33 * A[(i + 1) * (NK * NJ) + (j + 1) * NK + (k + 1)]; } } } } void conv3D_OMP(DATA_TYPE *A, DATA_TYPE *B) { int i, j, k; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; #pragma omp target device(GPU) map(to \ : A[:NI * NJ * NK]) map(from \ : B[:NI * NJ * NK]) #pragma omp parallel for collapse(2) for (j = 1; j < NJ - 1; ++j) { for (i = 1; i < NI - 1; ++i) { for (k = 1; k < NK - 1; ++k) { B[i * (NK * NJ) + j * NK + k] = c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c21 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c23 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c31 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c33 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c12 * A[(i + 0) * (NK * NJ) + (j - 1) * NK + (k + 0)] + c22 * A[(i + 0) * (NK * NJ) + (j + 0) * NK + (k + 0)] + c32 * A[(i + 0) * (NK * NJ) + (j + 1) * NK + (k + 0)] + c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c21 * A[(i - 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c23 * A[(i + 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c31 * A[(i - 1) * (NK * NJ) + (j + 1) * NK + (k + 1)] + c33 * A[(i + 1) * (NK * NJ) + (j + 1) * NK + (k + 1)]; } } } } void init(DATA_TYPE *A) { int i, j, k; for (i = 0; i < NI; ++i) { for (j = 0; j < NJ; ++j) { for (k = 0; k < NK; ++k) { A[i * (NK * NJ) + j * NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13); } } } } void compareResults(DATA_TYPE *B, DATA_TYPE *B_GPU) { int i, j, k, fail; fail = 0; // Compare result from cpu and gpu... for (i = 1; i < NI - 1; ++i) { for (j = 1; j < NJ - 1; ++j) { for (k = 1; k < NK - 1; ++k) { if (percentDiff(B[i * (NK * NJ) + j * NK + k], B_GPU[i * (NK * NJ) + j * NK + k]) > ERROR_THRESHOLD) { fail++; } } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *B_GPU; A = (DATA_TYPE *)malloc(NI * NJ * NK * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(NI * NJ * NK * sizeof(DATA_TYPE)); B_GPU = (DATA_TYPE *)malloc(NI * NJ * NK * sizeof(DATA_TYPE)); fprintf(stdout, ">> Three dimensional (3D) convolution <<\n"); init(A); t_start = rtclock(); conv3D_OMP(A, B_GPU); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); conv3D(A, B); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(B, B_GPU); free(A); free(B); free(B_GPU); return 0; }
mandel-par-dynamic.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> #include "complex.h" #include "linspace.h" int rows; int columns; int chunk_size; int nthreads; void setGlobalVariables() { rows = atoi(getenv("INPUTMAT_ROWS")); columns = atoi(getenv("INPUTMAT_COLS")); chunk_size = atoi(getenv("SCHED_CHUNK_SIZE")); nthreads = atoi(getenv("NUM_THREADS")); } void printMatrix(int **m) { for(int i = 0; i < rows; i++){ for(int j = 0; j < columns; j++){ printf("%d ", m[i][j]); } printf("\n"); } } void matrixToCsv(int **m) { FILE *fp; char filename[25]; sprintf(filename, "output-omp-par-%d.csv", rows); fp = fopen(filename, "w"); for (int i = 0; i < rows; i++) { for (int j = 0; j < columns; j++) { if (j != columns-1) { fprintf(fp, "%d;", m[i][j]); } else { fprintf(fp, "%d\n", m[i][j]); } } } fclose(fp); } int mandelbrotorbit(Complex c) { Complex z; z.re = 0.0; z.im = 0.0; z = addComplex(multComplex(z, z), c); for (int i = 1; i <= 100; i++) { if (absComplex(z) > 2) { return i - 1; } z = addComplex(multComplex(z, z), c); } return 100; } int** mandelbrot(Complex** inputmat) { int **outputmat = malloc(rows * sizeof(int*)); for (int i = 0; i < rows; i++){ outputmat[i] = malloc(columns * sizeof(int)); } #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(nthreads) for(int i = 0; i < rows; i++) { for(int j = 0; j < columns; j++) { outputmat[i][j] = mandelbrotorbit(inputmat[i][j]); } } return outputmat; } int main(int argc, char *argv[]) { setGlobalVariables(); Complex start, end; start.re = -2.5; start.im = -1.25; end.re = 1.0; end.im = 1.25; Complex **inputmat = clinspace(start, end, rows, columns); int **outputmat = mandelbrot(inputmat); if(argc > 1){ if(!strcmp(argv[1], "-export")){ matrixToCsv(outputmat); } } return 0; }
fib.c
#include <stdio.h> #include <omp.h> u_int32_t fib(int n) { if (n < 2) return n; else { return fib(n-1) + fib(n-2); } } int main(int argc, char* argv[]) { int i = 0; int k = atoi(argv[1]); // Use only as many threads as are available #ifdef _OPENMP omp_set_num_threads(omp_get_num_procs()); #endif // Privatizes i, and lets each thread compute "round robin"-style, thus // preventing only one thread executing the last and heaviest block. #pragma omp parallel private(i) { #pragma omp for schedule(dynamic, 1) for (i=0; i<=k; i++) { printf("Fib(%d): %u\n", i, fib(i)); } } }
GB_unop__minv_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_uint16_uint16 // op(A') function: GB_unop_tran__minv_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CAST(z, aij) \ uint16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 16) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_uint16_uint16 ( uint16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 16) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 16) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
wshfl.c
/* Copyright 2018. Massachusetts Institute of Technology. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2018 Siddharth Iyer <ssi@mit.edu> * * Tamir J, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M. * T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging. * Magnetic resonance in medicine. 2017 Jan 1;77(1):180-95. * * B Bilgic, BA Gagoski, SF Cauley, AP Fan, JR Polimeni, PE Grant, * LL Wald, and K Setsompop, Wave-CAIPI for highly accelerated 3D * imaging. Magn Reson Med (2014) doi: 10.1002/mrm.25347 * * Iyer S, Bilgic B, Setsompop K. * Faster T2 shuffling with Wave. * Presented in the session: "Signal Encoding and Decoding" at ISMRM 2018. * https://www.ismrm.org/18/program_files/O67.htm */ #include <assert.h> #include <stdbool.h> #include <complex.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "num/multind.h" #include "num/flpmath.h" #include "num/fft.h" #include "num/init.h" #include "num/iovec.h" #include "num/ops.h" #ifdef USE_CUDA #include "num/gpuops.h" #endif #include "iter/iter.h" #include "iter/lsqr.h" #include "iter/misc.h" #include "linops/linop.h" #include "linops/fmac.h" #include "linops/someops.h" #include "linops/realval.h" #include "sense/model.h" #include "misc/debug.h" #include "misc/mri.h" #include "misc/utils.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/opts.h" #include "wavelet/wavthresh.h" #include "lowrank/lrthresh.h" static const char usage_str[] = "<maps> <wave> <phi> <reorder> <table> <output>"; static const char help_str[] = "Perform a wave-shuffling reconstruction.\n\n" "Conventions:\n" " * (sx, sy, sz) - Spatial dimensions.\n" " * wx - Extended FOV in READ_DIM due to\n" " wave's voxel spreading.\n" " * (nc, md) - Number of channels and ESPIRiT's \n" " extended-SENSE model operator\n" " dimensions (or # of maps).\n" " * (tf, tk) - Turbo-factor and the rank\n" " of the temporal basis used in\n" " shuffling.\n" " * ntr - Number of TRs, or the number of\n" " (ky, kz) points acquired of one\n" " echo image.\n" " * n - Total number of (ky, kz) points\n" " acquired. This is equal to the\n" " product of ntr and tf.\n\n" "Descriptions:\n" " * reorder is an (n by 3) index matrix such that\n" " [ky, kz, t] = reorder(i, :) represents the\n" " (ky, kz) kspace position of the readout line\n" " acquired at echo number (t), and 0 <= ky < sy,\n" " 0 <= kz < sz, 0 <= t < tf).\n" " * table is a (wx by nc by n) matrix such that\n" " table(:, :, k) represents the kth multichannel\n" " kspace line.\n\n" "Expected dimensions:\n" " * maps - ( sx, sy, sz, nc, md, 1, 1)\n" " * wave - ( wx, sy, sz, 1, 1, 1, 1)\n" " * phi - ( 1, 1, 1, 1, 1, tf, tk)\n" " * output - ( sx, sy, sz, 1, md, 1, tk)\n" " * reorder - ( n, 3, 1, 1, 1, 1, 1)\n" " * table - ( wx, nc, n, 1, 1, 1, 1)"; /* Helper function to print out operator dimensions. */ static void print_opdims(const struct linop_s* op) { const struct iovec_s* domain = linop_domain(op); const struct iovec_s* codomain = linop_codomain(op); debug_printf(DP_INFO, "\tDomain: "); debug_print_dims(DP_INFO, domain->N, domain->dims); debug_printf(DP_INFO, "\tCodomain: "); debug_print_dims(DP_INFO, codomain->N, codomain->dims); } /* Construct sampling mask array from reorder tables. */ static void construct_mask( long reorder_dims[DIMS], complex float* reorder, long mask_dims[DIMS], complex float* mask) { long n = reorder_dims[0]; long sy = mask_dims[1]; long sz = mask_dims[2]; long y = 0; long z = 0; long t = 0; for (int i = 0; i < n; i++) { y = lround(creal(reorder[i])); z = lround(creal(reorder[i + n])); t = lround(creal(reorder[i + 2 * n])); mask[(y + z * sy) + t * sy * sz] = 1; } } static DEF_TYPEID(kern_s); struct kern_s { INTERFACE(linop_data_t); unsigned int N; long* reorder_dims; // Dimension of the index table: ( n, 3, 1, 1, 1, 1, 1, 1) long* phi_dims; // Dimension of the temporal basis: ( 1, 1, 1, 1, 1, tf, tk, 1) long* table_dims; // Dimension of the data table: (wx, nc, n, 1, 1, 1, 1, 1) long* kernel_dims; // Dimension of the kernel: ( 1, sy, sz, 1, 1, 1, tk, tk) complex float* reorder; complex float* phi; complex float* kernel; complex float* gpu_kernel; }; /* Go to table from coefficient-kspace with memory efficiency. */ static void kern_apply(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long n = data->reorder_dims[0]; long tf = data->phi_dims[5]; long tk = data->phi_dims[6]; long input_dims[] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = wx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long perm_dims[] = { [0 ... DIMS - 1] = 1 }; perm_dims[0] = wx; perm_dims[1] = nc; perm_dims[3] = tk; perm_dims[4] = sy; perm_dims[5] = sz; complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, src); unsigned int permute_order[DIMS] = {0, 3, 5, 6, 1, 2, 4, 7}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, perm_dims, perm, input_dims, src, CFL_SIZE); long vec_dims[] = {wx, nc, tf, 1}; long phi_mat_dims[] = { 1, 1, tf, tk}; long phi_in_dims[] = {wx, nc, 1, tk}; long fmac_dims[] = {wx, nc, tf, tk}; long line_dims[] = {wx, nc, 1, 1}; complex float* vec = md_alloc_sameplace(4, vec_dims, CFL_SIZE, src); long vec_str[4]; md_calc_strides(4, vec_str, vec_dims, CFL_SIZE); long phi_mat_str[4]; md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE); long phi_in_str[4]; md_calc_strides(4, phi_in_str, phi_in_dims, CFL_SIZE); long fmac_str[4]; md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE); int y = -1; int z = -1; int t = -1; for (int i = 0; i < n; i ++) { y = lround(creal(data->reorder[i])); z = lround(creal(data->reorder[i + n])); t = lround(creal(data->reorder[i + 2 * n])); md_clear(4, vec_dims, vec, CFL_SIZE); md_zfmac2(4, fmac_dims, vec_str, vec, phi_in_str, (perm + ((wx * nc * tk) * (y + z * sy))), phi_mat_str, data->phi); md_copy(4, line_dims, dst + (i * wx * nc), vec + (t * wx * nc), CFL_SIZE); } md_free(perm); md_free(vec); } /* Collapse data table into the temporal basis for memory efficiency. */ static void kern_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src) { struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long n = data->reorder_dims[0]; long tf = data->phi_dims[5]; long tk = data->phi_dims[6]; long perm_dims[] = { [0 ... DIMS - 1] = 1 }; perm_dims[0] = wx; perm_dims[1] = nc; perm_dims[3] = tk; perm_dims[4] = sy; perm_dims[5] = sz; complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, dst); md_clear(DIMS, perm_dims, perm, CFL_SIZE); #ifdef _OPENMP long num_threads = omp_get_max_threads(); #else long num_threads = 1; #endif long vec_dims[] = {wx, nc, tf, 1}; long phi_mat_dims[] = { 1, 1, tf, tk}; long phi_out_dims[] = {wx, nc, 1, tk}; long fmac_dims[] = {wx, nc, tf, tk}; long line_dims[] = {wx, nc, 1, 1}; long vthrd_dims[] = {wx, nc, tf, 1, num_threads}; complex float* vec = md_calloc(5, vthrd_dims, CFL_SIZE); long vec_str[4]; md_calc_strides(4, vec_str, vec_dims, CFL_SIZE); long phi_mat_str[4]; md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE); long phi_out_str[4]; md_calc_strides(4, phi_out_str, phi_out_dims, CFL_SIZE); long fmac_str[4]; md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE); #pragma omp parallel for for (int k = 0; k < sy * sz; k ++) { #ifdef _OPENMP int tid = omp_get_thread_num(); #else int tid = 0; #endif int y = k % sy; int z = k / sy; int t = -1; md_clear(4, vec_dims, vec + (wx * nc * tf * tid), CFL_SIZE); for (int i = 0; i < n; i ++) { if ((y == lround(creal(data->reorder[i]))) && (z == lround(creal(data->reorder[i + n])))) { t = lround(creal(data->reorder[i + 2 * n])); md_copy(4, line_dims, (vec + (wx * nc * tf * tid) + t * wx * nc), (src + i * wx * nc), CFL_SIZE); } } md_zfmacc2(4, fmac_dims, phi_out_str, perm + (y + z * sy) * (wx * nc * tk), vec_str, vec + (wx * nc * tf * tid), phi_mat_str, data->phi); } long out_dims[] = { [0 ... DIMS - 1] = 1 }; out_dims[0] = wx; out_dims[1] = sy; out_dims[2] = sz; out_dims[3] = nc; out_dims[6] = tk; unsigned int permute_order[DIMS] = {0, 4, 5, 1, 6, 2, 3, 7}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, out_dims, dst, perm_dims, perm, CFL_SIZE); md_free(vec); md_free(perm); } static void kern_normal(const linop_data_t* _data, complex float* dst, const complex float* src) { const struct kern_s* data = CAST_DOWN(kern_s, _data); long wx = data->table_dims[0]; long sy = data->kernel_dims[1]; long sz = data->kernel_dims[2]; long nc = data->table_dims[1]; long tk = data->phi_dims[6]; long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = wx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long input_str[DIMS]; md_calc_strides(DIMS, input_str, input_dims, CFL_SIZE); long output_dims[DIMS]; md_copy_dims(DIMS, output_dims, input_dims); output_dims[6] = 1; output_dims[7] = tk; long output_str[DIMS]; md_calc_strides(DIMS, output_str, output_dims, CFL_SIZE); long gpu_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, gpu_kernel_dims, data->kernel_dims); gpu_kernel_dims[0] = wx; gpu_kernel_dims[3] = nc; long kernel_str[DIMS]; md_calc_strides(DIMS, kernel_str, data->kernel_dims, CFL_SIZE); long gpu_kernel_str[DIMS]; md_calc_strides(DIMS, gpu_kernel_str, gpu_kernel_dims, CFL_SIZE); long fmac_dims[DIMS]; md_merge_dims(DIMS, fmac_dims, input_dims, data->kernel_dims); md_clear(DIMS, output_dims, dst, CFL_SIZE); #ifdef USE_CUDA if(cuda_ondevice(src)) md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, gpu_kernel_str, data->gpu_kernel); else #endif md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, kernel_str, data->kernel); } static void kern_free(const linop_data_t* _data) { const struct kern_s* data = CAST_DOWN(kern_s, _data); xfree(data->reorder_dims); xfree(data->phi_dims); xfree(data->table_dims); xfree(data->kernel_dims); #ifdef USE_CUDA if (data->gpu_kernel != NULL) md_free(data->gpu_kernel); #endif xfree(data); } static const struct linop_s* linop_kern_create(bool gpu_flag, const long _reorder_dims[DIMS], complex float* reorder, const long _phi_dims[DIMS], complex float* phi, const long _kernel_dims[DIMS], complex float* kernel, const long _table_dims[DIMS]) { PTR_ALLOC(struct kern_s, data); SET_TYPEID(kern_s, data); data->N = DIMS; PTR_ALLOC(long[DIMS], reorder_dims); PTR_ALLOC(long[DIMS], phi_dims); PTR_ALLOC(long[DIMS], table_dims); PTR_ALLOC(long[DIMS], kernel_dims); md_copy_dims(DIMS, *reorder_dims, _reorder_dims); md_copy_dims(DIMS, *phi_dims, _phi_dims); md_copy_dims(DIMS, *table_dims, _table_dims); md_copy_dims(DIMS, *kernel_dims, _kernel_dims); data->reorder_dims = *PTR_PASS(reorder_dims); data->phi_dims = *PTR_PASS(phi_dims); data->table_dims = *PTR_PASS(table_dims); data->kernel_dims = *PTR_PASS(kernel_dims); data->reorder = reorder; data->phi = phi; data->kernel = kernel; data->gpu_kernel = NULL; #ifdef USE_CUDA if(gpu_flag) { long repmat_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1}; md_copy_dims(DIMS, repmat_kernel_dims, _kernel_dims); repmat_kernel_dims[0] = _table_dims[0]; repmat_kernel_dims[3] = _table_dims[1]; long kernel_strs[DIMS]; long repmat_kernel_strs[DIMS]; md_calc_strides(DIMS, kernel_strs, _kernel_dims, CFL_SIZE); md_calc_strides(DIMS, repmat_kernel_strs, repmat_kernel_dims, CFL_SIZE); complex float* repmat_kernel = md_calloc(DIMS, repmat_kernel_dims, CFL_SIZE); md_copy2(DIMS, repmat_kernel_dims, repmat_kernel_strs, repmat_kernel, kernel_strs, kernel, CFL_SIZE); data->gpu_kernel = md_gpu_move(DIMS, repmat_kernel_dims, repmat_kernel, CFL_SIZE); md_free(repmat_kernel); } #else UNUSED(gpu_flag); #endif long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; input_dims[0] = _table_dims[0]; input_dims[1] = _kernel_dims[1]; input_dims[2] = _kernel_dims[2]; input_dims[3] = _table_dims[1]; input_dims[6] = _phi_dims[6]; long output_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; output_dims[0] = _table_dims[0]; output_dims[1] = _table_dims[1]; output_dims[2] = _reorder_dims[0]; const struct linop_s* K = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), kern_apply, kern_adjoint, kern_normal, NULL, kern_free); return K; } /* ESPIRiT operator. */ static const struct linop_s* linop_espirit_create(long sx, long sy, long sz, long nc, long md, long tk, complex float* maps) { long max_dims[] = { [0 ... DIMS - 1] = 1}; max_dims[0] = sx; max_dims[1] = sy; max_dims[2] = sz; max_dims[3] = nc; max_dims[4] = md; max_dims[6] = tk; const struct linop_s* E = linop_fmac_create(DIMS, max_dims, MAPS_FLAG, COIL_FLAG, TE_FLAG|COEFF_FLAG, maps); return E; } /* Resize operator. */ static const struct linop_s* linop_reshape_create(long wx, long sx, long sy, long sz, long nc, long tk) { long input_dims[] = { [0 ... DIMS - 1] = 1}; input_dims[0] = sx; input_dims[1] = sy; input_dims[2] = sz; input_dims[3] = nc; input_dims[6] = tk; long output_dims[DIMS]; md_copy_dims(DIMS, output_dims, input_dims); output_dims[0] = wx; struct linop_s* R = linop_resize_create(DIMS, output_dims, input_dims); return R; } /* Fx operator. */ static const struct linop_s* linop_fx_create(long wx, long sy, long sz, long nc, long tk) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; struct linop_s* Fx = linop_fft_create(DIMS, dims, READ_FLAG); return Fx; } /* Wave operator. */ static const struct linop_s* linop_wave_create(long wx, long sy, long sz, long nc, long tk, complex float* psf) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; struct linop_s* W = linop_cdiag_create(DIMS, dims, FFT_FLAGS, psf); return W; } /* Fyz operator. */ static const struct linop_s* linop_fyz_create(long wx, long sy, long sz, long nc, long tk) { long dims[] = { [0 ... DIMS - 1] = 1}; dims[0] = wx; dims[1] = sy; dims[2] = sz; dims[3] = nc; dims[6] = tk; struct linop_s* Fyz = linop_fft_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG); return Fyz; } /* Construction sampling temporal kernel.*/ static void construct_kernel( long mask_dims[DIMS], complex float* mask, long phi_dims[DIMS], complex float* phi, long kern_dims[DIMS], complex float* kern) { long sy = mask_dims[1]; long sz = mask_dims[2]; long tf = phi_dims[5]; long tk = phi_dims[6]; long cvec_dims[] = { [0 ... DIMS - 1] = 1 }; cvec_dims[6] = tk; long cvec_str[DIMS]; md_calc_strides(DIMS, cvec_str, cvec_dims, CFL_SIZE); complex float cvec[tk]; long tvec_dims[] = { [0 ... DIMS - 1] = 1 }; tvec_dims[5] = tf; long tvec_str[DIMS]; md_calc_strides(DIMS, tvec_str, tvec_dims, CFL_SIZE); complex float mvec[tf]; complex float tvec1[tf]; complex float tvec2[tf]; long phi_str[DIMS]; md_calc_strides(DIMS, phi_str, phi_dims, CFL_SIZE); long out_dims[] = { [0 ... DIMS - 1] = 1 }; out_dims[0] = tk; out_dims[1] = sy; out_dims[2] = sz; out_dims[3] = tk; complex float* out = md_calloc(DIMS, out_dims, CFL_SIZE); for (int y = 0; y < sy; y ++) { for (int z = 0; z < sz; z ++) { for (int t = 0; t < tf; t ++) mvec[t] = mask[(y + sy * z) + (sy * sz) * t]; for (int t = 0; t < tk; t ++) { cvec[t] = 1; md_clear(DIMS, tvec_dims, tvec1, CFL_SIZE); md_zfmac2(DIMS, phi_dims, tvec_str, tvec1, cvec_str, cvec, phi_str, phi); md_clear(DIMS, tvec_dims, tvec2, CFL_SIZE); md_zfmac2(DIMS, tvec_dims, tvec_str, tvec2, tvec_str, tvec1, tvec_str, mvec); md_clear(DIMS, cvec_dims, out + y * tk + z * sy * tk + t * sy * sz * tk, CFL_SIZE); md_zfmacc2(DIMS, phi_dims, cvec_str, out + y * tk + z * sy * tk + t * sy * sz * tk, tvec_str, tvec2, phi_str, phi); cvec[t] = 0; } } } unsigned int permute_order[DIMS] = {4, 1, 2, 5, 6, 7, 3, 0}; for (unsigned int i = 8; i < DIMS; i++) permute_order[i] = i; md_permute(DIMS, permute_order, kern_dims, kern, out_dims, out, CFL_SIZE); md_free(out); } static void fftmod_apply(long sy, long sz, long reorder_dims[DIMS], complex float* reorder, long table_dims[DIMS], complex float* table, long maps_dims[DIMS], complex float* maps) { long wx = table_dims[0]; long nc = table_dims[1]; fftmod(DIMS, table_dims, READ_FLAG, table, table); fftmod(DIMS, maps_dims, FFT_FLAGS, maps, maps); long y = -1; long z = -1; double dy = ((double) sy/2)/((double) sy); double dz = ((double) sz/2)/((double) sz); complex float py = 1; complex float pz = 1; long dims[] = { [0 ... DIMS] = 1}; dims[0] = wx; dims[1] = nc; long n = reorder_dims[0]; for (long k = 0; k < n; k++) { y = lround(creal(reorder[k])); z = lround(creal(reorder[k + n])); py = cexp(2.i * M_PI * dy * y); pz = cexp(2.i * M_PI * dz * z); md_zsmul(DIMS, dims, table + k * wx * nc, table + k * wx * nc, py * pz); } } enum algo_t { CG, IST, FISTA }; int main_wshfl(int argc, char* argv[]) { double start_time = timestamp(); float lambda = 1E-5; int maxiter = 300; int blksize = 8; float step = 0.5; float tol = 1.E-3; bool llr = false; bool wav = false; bool fista = false; bool hgwld = false; float cont = 1; float eval = -1; const char* fwd = NULL; int gpun = -1; bool rvc = false; const struct opt_s opts[] = { OPT_FLOAT( 'r', &lambda, "lambda", "Soft threshold lambda for wavelet or locally low rank."), OPT_INT( 'b', &blksize, "blkdim", "Block size for locally low rank."), OPT_INT( 'i', &maxiter, "mxiter", "Maximum number of iterations."), OPT_FLOAT( 's', &step, "stepsz", "Step size for iterative method."), OPT_FLOAT( 'c', &cont, "cntnu", "Continuation value for IST/FISTA."), OPT_FLOAT( 't', &tol, "toler", "Tolerance convergence condition for iterative method."), OPT_FLOAT( 'e', &eval, "eigvl", "Maximum eigenvalue of normal operator, if known."), OPT_STRING('F', &fwd, "frwrd", "Go from shfl-coeffs to data-table. Pass in coeffs path."), OPT_INT( 'g', &gpun, "gpunm", "GPU device number."), OPT_SET( 'f', &fista, "Reconstruct using FISTA instead of IST."), OPT_SET( 'H', &hgwld, "Use hogwild in IST/FISTA."), OPT_SET( 'w', &wav, "Use wavelet."), OPT_SET( 'l', &llr, "Use locally low rank."), OPT_SET( 'v', &rvc, "Apply real valued constraint on coefficients."), }; cmdline(&argc, argv, 6, 6, usage_str, help_str, ARRAY_SIZE(opts), opts); debug_printf(DP_INFO, "Loading data... "); long maps_dims[DIMS]; complex float* maps = load_cfl(argv[1], DIMS, maps_dims); long wave_dims[DIMS]; complex float* wave = load_cfl(argv[2], DIMS, wave_dims); long phi_dims[DIMS]; complex float* phi = load_cfl(argv[3], DIMS, phi_dims); long reorder_dims[DIMS]; complex float* reorder = load_cfl(argv[4], DIMS, reorder_dims); long table_dims[DIMS]; complex float* table = load_cfl(argv[5], DIMS, table_dims); debug_printf(DP_INFO, "Done.\n"); if (gpun >= 0) num_init_gpu_device(gpun); else num_init(); int wx = wave_dims[0]; int sx = maps_dims[0]; int sy = maps_dims[1]; int sz = maps_dims[2]; int nc = maps_dims[3]; int md = maps_dims[4]; int tf = phi_dims[5]; int tk = phi_dims[6]; debug_printf(DP_INFO, "Constructing sampling mask from reorder table... "); long mask_dims[] = { [0 ... DIMS - 1] = 1 }; mask_dims[1] = sy; mask_dims[2] = sz; mask_dims[5] = tf; complex float* mask = md_calloc(DIMS, mask_dims, CFL_SIZE); construct_mask(reorder_dims, reorder, mask_dims, mask); debug_printf(DP_INFO, "Done.\n"); debug_printf(DP_INFO, "Constructing sampling-temporal kernel... "); long kernel_dims[] = { [0 ... DIMS - 1] = 1 }; kernel_dims[1] = sy; kernel_dims[2] = sz; kernel_dims[6] = tk; kernel_dims[7] = tk; complex float* kernel = md_calloc(DIMS, kernel_dims, CFL_SIZE); construct_kernel(mask_dims, mask, phi_dims, phi, kernel_dims, kernel); md_free(mask); debug_printf(DP_INFO, "Done.\n"); long coeff_dims[] = { [0 ... DIMS - 1] = 1 }; coeff_dims[0] = sx; coeff_dims[1] = sy; coeff_dims[2] = sz; coeff_dims[4] = md; coeff_dims[6] = tk; debug_printf(DP_INFO, "Linear operator.\n"); const struct linop_s* E = linop_espirit_create(sx, sy, sz, nc, md, tk, maps); const struct linop_s* R = linop_reshape_create(wx, sx, sy, sz, nc, tk); const struct linop_s* Fx = linop_fx_create(wx, sy, sz, nc, tk); const struct linop_s* W = linop_wave_create(wx, sy, sz, nc, tk, wave); const struct linop_s* Fyz = linop_fyz_create(wx, sy, sz, nc, tk); const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, table_dims); struct linop_s* A = linop_chain(linop_chain(linop_chain(linop_chain(linop_chain( E, R), Fx), W), Fyz), K); if (rvc == true) { debug_printf(DP_INFO, "\tDomain is restricted to real numbers.\n"); struct linop_s* tmp = A; struct linop_s* rvcop = linop_realval_create(DIMS, linop_domain(A)->dims); A = linop_chain(rvcop, tmp); linop_free(rvcop); linop_free(tmp); } linop_free(E); linop_free(R); linop_free(Fx); linop_free(W); linop_free(Fyz); linop_free(K); print_opdims(A); if (fwd != NULL) { debug_printf(DP_INFO, "Going from coefficients to data table... "); complex float* coeffs_to_fwd = load_cfl(fwd, DIMS, coeff_dims); complex float* table_forward = create_cfl(argv[6], DIMS, table_dims); operator_apply(A->forward, DIMS, table_dims, table_forward, DIMS, coeff_dims, coeffs_to_fwd); unmap_cfl(DIMS, table_dims, table_forward); debug_printf(DP_INFO, "Done. Output table not normalized and not centered for fft.\n"); return 0; } if (eval < 0) #ifdef USE_CUDA eval = (gpun >= 0) ? estimate_maxeigenval_gpu(A->normal) : estimate_maxeigenval(A->normal); #else eval = estimate_maxeigenval(A->normal); #endif debug_printf(DP_INFO, "\tMax eval: %.2e\n", eval); step /= eval; debug_printf(DP_INFO, "Normalizing data table and applying fftmod to table... "); float norm = md_znorm(DIMS, table_dims, table); md_zsmul(DIMS, table_dims, table, table, 1. / norm); fftmod_apply(sy, sz, reorder_dims, reorder, table_dims, table, maps_dims, maps); debug_printf(DP_INFO, "Done.\n"); const struct operator_p_s* T = NULL; long blkdims[MAX_LEV][DIMS]; long minsize[] = { [0 ... DIMS - 1] = 1 }; minsize[0] = MIN(sx, 16); minsize[1] = MIN(sy, 16); minsize[2] = MIN(sz, 16); unsigned int WAVFLAG = (sx > 1) * READ_FLAG | (sy > 1) * PHS1_FLAG | (sz > 2) * PHS2_FLAG; enum algo_t algo = CG; if ((wav == true) || (llr == true)) { algo = (fista) ? FISTA : IST; if (wav) { debug_printf(DP_INFO, "Creating wavelet threshold operator... "); T = prox_wavelet_thresh_create(DIMS, coeff_dims, WAVFLAG, 0u, minsize, lambda, true); } else { debug_printf(DP_INFO, "Creating locally low rank threshold operator... "); llr_blkdims(blkdims, ~COEFF_DIM, coeff_dims, blksize); T = lrthresh_create(coeff_dims, true, ~COEFF_FLAG, (const long (*)[])blkdims, lambda, false, false); } debug_printf(DP_INFO, "Done.\n"); } italgo_fun2_t italgo = iter2_call_iter; struct iter_call_s iter2_data; SET_TYPEID(iter_call_s, &iter2_data); iter_conf* iconf = CAST_UP(&iter2_data); struct iter_conjgrad_conf cgconf = iter_conjgrad_defaults; struct iter_fista_conf fsconf = iter_fista_defaults; struct iter_ist_conf isconf = iter_ist_defaults; switch(algo) { case IST: debug_printf(DP_INFO, "Using IST.\n"); debug_printf(DP_INFO, "\tLambda: %0.2e\n", lambda); debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter); debug_printf(DP_INFO, "\tStep size: %0.2e\n", step); debug_printf(DP_INFO, "\tHogwild: %d\n", (int) hgwld); debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol); debug_printf(DP_INFO, "\tContinuation: %0.2e\n", cont); isconf = iter_ist_defaults; isconf.step = step; isconf.maxiter = maxiter; isconf.tol = tol; isconf.continuation = cont; isconf.hogwild = hgwld; iter2_data.fun = iter_ist; iter2_data._conf = CAST_UP(&isconf); break; case FISTA: debug_printf(DP_INFO, "Using FISTA.\n"); debug_printf(DP_INFO, "\tLambda: %0.2e\n", lambda); debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter); debug_printf(DP_INFO, "\tStep size: %0.2e\n", step); debug_printf(DP_INFO, "\tHogwild: %d\n", (int) hgwld); debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol); debug_printf(DP_INFO, "\tContinuation: %0.2e\n", cont); fsconf = iter_fista_defaults; fsconf.maxiter = maxiter; fsconf.step = step; fsconf.hogwild = hgwld; fsconf.tol = tol; fsconf.continuation = cont; iter2_data.fun = iter_fista; iter2_data._conf = CAST_UP(&fsconf); break; default: case CG: debug_printf(DP_INFO, "Using CG.\n"); debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter); debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol); cgconf = iter_conjgrad_defaults; cgconf.maxiter = maxiter; cgconf.l2lambda = 0; cgconf.tol = tol; iter2_data.fun = iter_conjgrad; iter2_data._conf = CAST_UP(&cgconf); break; } debug_printf(DP_INFO, "Reconstruction... "); complex float* recon = create_cfl(argv[6], DIMS, coeff_dims); struct lsqr_conf lsqr_conf = { 0., gpun >= 0 }; double recon_start = timestamp(); const struct operator_s* J = lsqr2_create(&lsqr_conf, italgo, iconf, NULL, A, NULL, 1, &T, NULL, NULL); operator_apply(J, DIMS, coeff_dims, recon, DIMS, table_dims, table); double recon_end = timestamp(); debug_printf(DP_INFO, "Done.\nReconstruction time: %f seconds.\n", recon_end - recon_start); debug_printf(DP_INFO, "Cleaning up and saving result... "); operator_free(J); linop_free(A); md_free(kernel); unmap_cfl(DIMS, maps_dims, maps); unmap_cfl(DIMS, wave_dims, wave); unmap_cfl(DIMS, phi_dims, phi); unmap_cfl(DIMS, reorder_dims, reorder); unmap_cfl(DIMS, table_dims, table); unmap_cfl(DIMS, coeff_dims, recon); debug_printf(DP_INFO, "Done.\n"); double end_time = timestamp(); debug_printf(DP_INFO, "Total time: %f seconds.\n", end_time - start_time); return 0; }
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } protected: virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
main.c
#include "common.h" static void print_help(char *argv) { END("%s [-f edge_file] [-W width] [-H height] [-D degree] [-R length] [-o output_file] [-s random_seed]\ [-n calculations] [-w max_temperature] [-c min_temperature] [-g groups] [-C cooling_cycle] [-B] [-d]\ [-F fixed_temperature] [-Y] [-M] [-h]\n", argv); } static void set_args(const int argc, char **argv, char *infname, int *low_length, char *outfname, int *random_seed, long long *ncalcs, double *max_temp, double *min_temp, int *groups, int *cooling_cycle, bool *enable_hill_climbing, bool *enable_detect_temp, bool *enable_bfs, bool *enable_halfway, double *fixed_temp, int *width, int *height, int *max_degree) { if(argc < 3) print_help(argv[0]); int result; while((result = getopt(argc,argv,"f:W:H:D:R:o:s:n:w:c:g:C:BdF:YMh"))!=-1){ switch(result){ case 'f': if(strlen(optarg) > MAX_FILENAME_LENGTH) ERROR("Input filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg); strcpy(infname, optarg); break; case 'W': *width = atoi(optarg); if(*width <= 0) ERROR("-W value > 0\n"); break; case 'H': *height = atoi(optarg); if(*height <= 0) ERROR("-H value > 0\n"); break; case 'D': *max_degree = atoi(optarg); if(*max_degree <= 0) ERROR("-D value > 0\n"); break; case 'R': *low_length = atoi(optarg); if(*low_length <= 0) ERROR("-R value > 0\n"); break; case 'o': if(strlen(optarg) > MAX_FILENAME_LENGTH) ERROR("Output filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg); strcpy(outfname, optarg); break; case 's': *random_seed = atoi(optarg); if(*random_seed < 0) ERROR("-s value >= 0\n"); break; case 'n': *ncalcs = atoll(optarg); if(*ncalcs < 0) ERROR("-n value >= 0\n"); break; case 'w': *max_temp = atof(optarg); if(*max_temp <= 0) ERROR("-w value > 0\n"); break; case 'c': *min_temp = atof(optarg); if(*min_temp <= 0) ERROR("-c value > 0\n"); break; case 'g': *groups = atoi(optarg); if(*groups != 1 && *groups != 2 && *groups != 4) ERROR("-g value == 1 or 2 or 4\n"); break; case 'C': *cooling_cycle = atoi(optarg); if(*cooling_cycle <= 0) ERROR("-C value > 0\n"); break; case 'B': *enable_bfs = true; break; case 'd': *enable_detect_temp = true; break; case 'F': *fixed_temp = atof(optarg); if(*fixed_temp <= 0) ERROR("-F value > 0\n"); break; case 'Y': *enable_hill_climbing = true; break; case 'M': *enable_halfway = true; break; case 'h': default: print_help(argv[0]); } } } // The "edge" does not have NO_EDGE static int count_loop(const int lines, const int *edge) { int num = 0; for(int i=0;i<lines;i++) if(edge[i*2] == edge[i*2+1]) num++; return num; } static bool confirm_dist(const int v, const int w, const int height, const int low_length) { return (DISTANCE(v, w, height) <= low_length); } static void simple_exchange_edge(const int height, const int low_length, const int lines, int* edge) { while(1){ int e1, e2, new_e1_v, new_e1_w, new_e2_v, new_e2_w; do{ e1 = getRandom(lines); e2 = getRandom(lines); } while( e1 == e2 ); int e1_v = edge[e1*2]; int e1_w = edge[e1*2+1]; int e2_v = edge[e2*2]; int e2_w = edge[e2*2+1]; if(confirm_dist(e1_v, e2_v, height, low_length) && confirm_dist(e1_w, e2_w, height, low_length)){ new_e1_v = e1_v; new_e1_w = e2_v; new_e2_v = e1_w; new_e2_w = e2_w; } else if(confirm_dist(e1_v, e2_w, height, low_length) && confirm_dist(e1_w, e2_v, height, low_length)){ new_e1_v = e1_v; new_e1_w = e2_w; new_e2_v = e1_w; new_e2_w = e2_v; } else{ continue; } edge[2*e1] = new_e1_v; edge[2*e1+1] = new_e1_w; edge[2*e2] = new_e2_v; edge[2*e2+1] = new_e2_w; break; } } #ifdef _OPENMP static int top_down_step(const int nodes, const int num_frontier, const int max_degree, const int* degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, char* restrict bitmap) { int count = 0; int local_frontier[nodes]; #pragma omp parallel private(local_frontier) { int local_count = 0; #pragma omp for nowait for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree[v];j++){ int n = *(adjacency + v * max_degree + j); // adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; local_frontier[local_count++] = n; } } } // end for i #pragma omp critical { memcpy(&next[count], local_frontier, local_count*sizeof(int)); count += local_count; } } return count; } #else static int top_down_step(const int nodes, const int num_frontier, const int max_degree, const int *degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, char* restrict bitmap) { int count = 0; for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree[v];j++){ int n = *(adjacency + v * max_degree + j); // int n = adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; next[count++] = n; } } } return count; } #endif static int simple_bfs(const int nodes, const int max_degree, const int *degree, int *adjacency) { char *bitmap = malloc(sizeof(char) * nodes); int *frontier = malloc(sizeof(int) * nodes); int *next = malloc(sizeof(int) * nodes); int num_frontier = 1, root = 0, num = 0; for(int i=0;i<nodes;i++) bitmap[i] = NOT_VISITED; frontier[0] = root; bitmap[root] = VISITED; while(1){ num_frontier = top_down_step(nodes, num_frontier, max_degree, degree, adjacency, frontier, next, bitmap); if(num_frontier == 0) break; int *tmp = frontier; frontier = next; next = tmp; } for(int i=0;i<nodes;i++) if(bitmap[i] == NOT_VISITED) num++; free(bitmap); free(frontier); free(next); return num; } // Inherited from http://research.nii.ac.jp/graphgolf/c/create-lattice.c static void create_lattice(const int nodes, const int lines, const int width, const int height, const int max_degree, int *degree, const int low_length, int edge[lines*2]) { int i = 0; for(int x=0;x<width/2;x++){ for(int y=0;y<height;y++){ for(int k=0;k<max_degree;k++){ edge[i*2] = y + 2 * x * height; edge[i*2+1] = edge[2*i] + height; i++; } } } if(width%2 == 1){ for(int y=0;y<height/2;y++){ for(int k=0;k<max_degree;k++){ edge[i*2] = (width - 1) * height + 2 * y; edge[i*2+1] = edge[i*2] + 1; i++; } } /* add self-loop */ if(height%2 == 1){ for(int k=0;k<max_degree/2;k++){ edge[i*2] = edge[i*2+1] = nodes - 1; i++; } } } for(int i=0;i<lines;i++) // Give randomness simple_exchange_edge(height, low_length, lines, edge); // Remove loops int *tmp_edge = malloc(lines*2*sizeof(int)); int min_num = count_loop(lines, edge); while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); simple_exchange_edge(height, low_length, lines, tmp_edge); int tmp_num = count_loop(lines, tmp_edge); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); } } } // Make an unconnected graph a connected graph // Note that the connected graph after this operation may have loops. int (*adjacency)[max_degree] = malloc(sizeof(int)*nodes*max_degree); // int adjacency[nodes][max_degree]; create_adjacency(nodes, lines, max_degree, degree, (const int (*)[2])edge, adjacency); min_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); simple_exchange_edge(height, low_length, lines, tmp_edge); create_adjacency(nodes, lines, max_degree, degree, (const int (*)[2])tmp_edge, adjacency); int tmp_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); } } } // Remove loops again if(count_loop(lines, edge) != 0){ while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); simple_exchange_edge(height, low_length, lines, tmp_edge); int tmp_num = count_loop(lines, tmp_edge); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); } } } } free(tmp_edge); free(adjacency); // for(int i=0;i<lines;i++) // printf("%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height), // WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height)); //EXIT(0); } static int count_lines(const char *fname) { FILE *fp = NULL; if((fp = fopen(fname, "r")) == NULL) ERROR("File not found\n"); int lines = 0, c; while((c = fgetc(fp)) != EOF) if(c == '\n') lines++; fclose(fp); return lines; } static void read_file_lattice(int *edge, int *w, int *h, const char *fname) { FILE *fp; if((fp = fopen(fname, "r")) == NULL){ PRINT_R0("File not found\n"); EXIT(1); } int n[4]; *w = 0; *h = 0; while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){ *w = MAX(*w, n[0]); *h = MAX(*h, n[1]); *w = MAX(*w, n[2]); *h = MAX(*h, n[3]); } *w += 1; *h += 1; rewind(fp); int i = 0; while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){ edge[i*2 ] = n[0] * (*h) + n[1]; edge[i*2+1] = n[2] * (*h) + n[3]; i++; } fclose(fp); } static int max_node_num(const int lines, const int edge[lines*2]) { int max = edge[0]; for(int i=1;i<lines*2;i++) max = MAX(max, edge[i]); return max; } static void verfy_graph(const int nodes, const int lines, const int edge[lines*2], const int height, const int low_length, const int max_degree) { PRINT_R0("Verifing a regular graph... "); for(int i=0;i<lines;i++){ if(edge[i*2] != NO_EDGE) if(DISTANCE(edge[i*2], edge[i*2+1], height) > low_length) ERROR("Over length in line %d: length = %d, distance = %d\n", i+1, low_length, DISTANCE(edge[i*2], edge[i*2+1], height)); } int degree[nodes]; for(int i=0;i<nodes;i++) degree[i] = 0; for(int i=0;i<lines;i++){ int n1 = edge[i*2 ]; int n2 = edge[i*2+1]; if(n1 != NO_EDGE){ degree[n1]++; if(degree[n1] > max_degree) ERROR("Degree is over %d\n", degree[n1]); degree[n2]++; if(degree[n2] > max_degree) ERROR("Degree is over %d\n", degree[n2]); } } PRINT_R0("OK\n"); } static void create_symmetric_edge(int *edge, const int based_nodes, const int based_lines, const int groups, const int max_degree, int *degree, const int nodes, const int lines, const int height, const int width, const int based_height, const int low_length) { for(int i=0;i<based_lines;i++) for(int j=0;j<2;j++) edge[i*2+j] = WIDTH(edge[i*2+j], based_height) * height + HEIGHT(edge[i*2+j], based_height); if(groups == 2){ for(int i=0;i<based_lines;i++) for(int j=0;j<2;j++) edge[(based_lines+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180); } else if(groups == 4){ for(int i=0;i<based_lines;i++){ for(int j=0;j<2;j++){ edge[(based_lines +i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 90); edge[(based_lines*2+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180); edge[(based_lines*3+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 270); } } } int *tmp_edge = malloc(lines*2*sizeof(int)); int *tmp_degree = malloc(nodes*sizeof(int)); int (*adjacency)[max_degree] = malloc(sizeof(int)*nodes*max_degree); // int adjacency[nodes][max_degree]; create_adjacency(nodes, lines, max_degree, degree, (const int (*)[2])edge, adjacency); int min_num = simple_bfs(nodes, max_degree, degree, (int *)adjacency); while(1){ memcpy(tmp_edge, edge, sizeof(int)*lines*2); memcpy(tmp_degree, degree, sizeof(int)*nodes); exchange_edge(nodes, lines, max_degree, tmp_degree, (int (*)[2])tmp_edge, height, width, groups, low_length, 0); create_adjacency(nodes, lines, max_degree, tmp_degree, (const int (*)[2])tmp_edge, adjacency); int tmp_num = simple_bfs(nodes, max_degree, tmp_degree, (int *)adjacency); if(tmp_num == 0){ memcpy(edge, tmp_edge, sizeof(int)*lines*2); break; } else{ if(tmp_num <= min_num){ min_num = tmp_num; memcpy(edge, tmp_edge, sizeof(int)*lines*2); memcpy(degree, tmp_degree, sizeof(int)*nodes); } } } free(tmp_edge); free(tmp_degree); free(adjacency); } static int dist(const int x1, const int y1, const int x2, const int y2) { return(abs(x1 - x2) + abs(y1 - y2)); } static void lower_bound_of_diam_aspl(int *low_diam, double *low_ASPL, const int m, const int n, const int max_degree, const int length) { int moore[m*n], hist[m*n], mh[m*n]; int mn = m * n, current = max_degree, ii; double sum = 0; moore[0] = 1; moore[1] = max_degree + 1; for(ii=2;;ii++){ current = current * (max_degree - 1); moore[ii] = moore[ii-1] + current; if(moore[ii] >= mn){ moore[ii] = mn; break; } } int maxhop = MAX((m+n-2+(length-1))/length, ii); for(int i=ii+1;i<=maxhop;i++) moore[i] = mn; for(int i=0;i<m;i++){ for(int j=0;j<n;j++){ for(int k=0;k<=maxhop;k++) hist[k] = 0; for (int i2=0;i2<m;i2++) for(int j2=0;j2<n;j2++) hist[(dist(i,j,i2,j2)+length-1)/length]++; for(int k=1;k<=maxhop;k++) hist[k] += hist[k-1]; for(int k=0;k<=maxhop;k++) mh[k] = MIN(hist[k], moore[k]); for(int k=1;k<=maxhop;k++) sum += (double)(mh[k] - mh[k-1]) * k; } } int dboth = 0; for(dboth=0;;dboth++) if(mh[dboth] == mn) break; *low_diam = dboth; *low_ASPL = sum/((double)mn*(mn-1)); } static void output_params(const int max_degree, const int groups, const int low_length, const int random_seed, const double max_temp, const double min_temp, const long long ncalcs, const int cooling_cycle, const double cooling_rate, const char *infname, const char *outfname, const double average_time, const bool enable_hill_climbing, const int width, const int height, const bool enable_bfs, const bool enable_fixed_temp, const double fixed_temp) { #ifdef NDEBUG PRINT_R0("NO DEBUG MODE\n"); #else PRINT_R0("DEBUG MODE\n"); #endif PRINT_R0("Seed : %d\n", random_seed); PRINT_R0("Processes: %d\n", procs); #ifdef _OPENMP PRINT_R0("Threads : %d\n", omp_get_max_threads()); #endif if(enable_bfs) PRINT_R0("APSP : BFS\n"); else PRINT_R0("APSP : MATRIX Opetation\n"); if(enable_hill_climbing) PRINT_R0("Algorithm: Hill climbing Method\n"); else{ if(enable_fixed_temp) PRINT_R0("Algorithm: Fixed Temperature Simulated Annealing : %f\n", fixed_temp); else PRINT_R0("Algorithm: Simulated Annealing\n"); PRINT_R0(" MAX Temperature: %f\n", max_temp); PRINT_R0(" MIN Temperature: %f\n", min_temp); PRINT_R0(" Cooling Cycle: %d\n", cooling_cycle); PRINT_R0(" Cooling Rate : %f\n", cooling_rate); } if(groups != 1) PRINT_R0(" Groups : %d\n", groups); PRINT_R0("Num. of Calulations: %lld\n", ncalcs); PRINT_R0(" Average APSP time : %f sec.\n", average_time); PRINT_R0(" Estimated elapse time: %f sec.\n", average_time * ncalcs); if(infname[0] != NOT_C_DEFINED) PRINT_R0("Input filename: %s\n", infname); PRINT_R0(" (w x h, d, r) = (%d x %d, %d, %d)\n", width, height, max_degree, low_length); if(outfname[0] != NOT_C_DEFINED) PRINT_R0("Output filename: %s\n", outfname); PRINT_R0("---\n"); } static void output_file(FILE *fp, const int lines, const int height, const int edge[lines*2]) { for(int i=0;i<lines;i++) if(edge[i*2] != NO_EDGE) fprintf(fp, "%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height), WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height)); } int main(int argc, char *argv[]) { bool enable_hill_climbing = false, enable_detect_temp = false, enable_bfs = false, enable_halfway = false; char hostname[MPI_MAX_PROCESSOR_NAME]; char infname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED}, outfname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED}; int random_seed = 0, cooling_cycle = 1, groups = 1; int namelen, based_lines, lines, based_width, based_height, based_nodes, nodes; int diam = NOT_N_DEFINED, max_degree = NOT_N_DEFINED, low_diam = NOT_N_DEFINED; int width = NOT_N_DEFINED, height = NOT_N_DEFINED, low_length = NOT_N_DEFINED; long long ncalcs = DEFAULT_NCALCS, num_accepts = 0; double ASPL = NOT_N_DEFINED, low_ASPL = NOT_N_DEFINED, cooling_rate = NOT_N_DEFINED, max_diff_energy = NOT_N_DEFINED; double max_temp = NOT_N_DEFINED, min_temp = NOT_N_DEFINED, fixed_temp = NOT_N_DEFINED; int *edge = NULL, *degree = NULL; FILE *fp = NULL; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &procs); MPI_Get_processor_name(hostname, &namelen); PRINT_R0("Run on %s\n", hostname); time_t t = time(NULL); PRINT_R0("%s---\n", ctime(&t)); // Set arguments set_args(argc, argv, infname, &low_length, outfname, &random_seed, &ncalcs, &max_temp, &min_temp, &groups, &cooling_cycle, &enable_hill_climbing, &enable_detect_temp, &enable_bfs, &enable_halfway, &fixed_temp, &width, &height, &max_degree); // Set other arguments bool enable_max_temp = (max_temp != NOT_N_DEFINED); bool enable_min_temp = (min_temp != NOT_N_DEFINED); bool enable_fixed_temp = (fixed_temp != NOT_N_DEFINED); bool enable_infname = (infname[0] != NOT_C_DEFINED); bool enable_outfname = (outfname[0] != NOT_C_DEFINED); bool enable_whd = (width != NOT_N_DEFINED && height != NOT_N_DEFINED && max_degree != NOT_N_DEFINED); // Check arguments if(low_length == NOT_N_DEFINED) ERROR("Must need -R\n"); else if(enable_hill_climbing && enable_max_temp) ERROR("Both -Y and -w cannot be used.\n"); else if(enable_hill_climbing && enable_min_temp) ERROR("Both -Y and -c cannot be used.\n"); else if(enable_hill_climbing && enable_detect_temp) ERROR("Both -Y and -d cannot be used.\n"); else if(!enable_infname && !enable_whd) ERROR("Must set -f or \"-W and -H and -D\"\n"); else if(enable_halfway && !enable_infname) ERROR("Must set both -M and -f\n"); if(!enable_max_temp) max_temp = 100.0; if(!enable_min_temp) min_temp = 0.217147; if(max_temp == min_temp) ERROR("The same values in -w and -c.\n"); if(enable_detect_temp) ncalcs = DEFAULT_DETECT_NCALS; srandom(random_seed); if(enable_infname){ ERROR("NOT implement yet\n"); based_lines = count_lines(infname); lines = (enable_halfway)? based_lines : based_lines * groups; edge = malloc(sizeof(int)*lines*2); // int edge[lines][2]; read_file_lattice(edge, &based_width, &based_height, infname); based_nodes = max_node_num(based_lines, (int *)edge) + 1; if(enable_halfway){ based_nodes /= groups; based_lines /= groups; if(groups == 2){ based_height /= 2; } else if(groups == 4){ based_width /= 2; based_height /= 2; } } if(groups == 1){ height = based_height; width = based_width; } else if(groups == 2){ height = based_height * 2; width = based_width; } else{ // groups == 4 height = based_height * 2; width = based_width * 2; } nodes = based_nodes * groups; max_degree = 2 * lines / nodes; } else{ nodes = width * height; based_nodes = nodes / groups; lines = nodes * max_degree / 2; based_lines = lines / groups; edge = malloc(sizeof(int)*lines*2); // int edge[lines][2]; degree = malloc(sizeof(int)*nodes); // int degree[nodes]; if(groups == 1){ based_width = width; based_height = height; } else if(groups == 2){ based_width = width; based_height = height/2; } else{ // groups == 4 based_width = width/2; based_height = height/2; } } if(groups == 4 && (based_width != based_height)) ERROR("When g = 4, width(%d) must be equal to height(%d).\n", based_width, based_height); else if(groups == 4 && width%2 != 0 && height%2 != 0) ERROR("When g = 4, width(%d) and height(%d) must be divisible by 2.\n", width, height); else if(groups == 2 && height%2 != 0) ERROR("When g = 2, height(%d) must be divisible by 2.\n", height); else if(nodes%groups != 0) ERROR("nodes(%d) must be divisible by groups(%d)\n", nodes, groups); else if(lines%groups != 0) ERROR("(nodes*max_degree/2) must be divisible by groups(%d)\n", groups); else if(based_width*based_height != based_nodes) ERROR("Not grid graph (width %d x height %d != nodes %d).\n", based_width, based_height, based_nodes); if(!enable_infname) create_lattice(based_nodes, based_lines, based_width, based_height, max_degree, degree, low_length, edge); int *rotate_hash = malloc(nodes * sizeof(int)); create_rotate_hash(nodes, height, width, groups, rotate_hash); if(!enable_halfway && groups != 1) create_symmetric_edge(edge, based_nodes, based_lines, groups, max_degree, degree, nodes, lines, height, width, based_height, low_length); verfy_graph(nodes, lines, edge, height, low_length, max_degree); lower_bound_of_diam_aspl(&low_diam, &low_ASPL, width, height, max_degree, low_length); check_current_edge(nodes, lines, max_degree, degree, edge, low_ASPL, low_diam, groups, height, based_height, enable_bfs, rotate_hash); double average_time = estimated_elapse_time(nodes, lines, max_degree, degree, edge, height, width, based_height, groups, low_length, enable_bfs, rotate_hash); if(enable_hill_climbing){ fixed_temp = max_temp = min_temp = 0.0; cooling_rate = 1.0; } else{ cooling_rate = pow(min_temp/max_temp, (double)cooling_cycle/ncalcs); } if(enable_outfname && rank == 0){ struct stat stat_buf; if(stat(outfname, &stat_buf) == 0) ERROR("Output file %s exsits. \n", outfname); if((fp = fopen(outfname, "w")) == NULL) ERROR("Cannot open %s\n", outfname); } output_params(max_degree, groups, low_length, random_seed, max_temp, min_temp, ncalcs, cooling_cycle, cooling_rate, infname, outfname, average_time, enable_hill_climbing, width, height, enable_bfs, enable_fixed_temp, fixed_temp); // Optimization timer_clear_all(); timer_start(TIMER_SA); long long step = sa(nodes, lines, max_degree, degree, based_nodes, ncalcs, cooling_rate, low_diam, low_ASPL, enable_bfs, enable_hill_climbing, enable_detect_temp, &max_diff_energy, max_temp, min_temp, fixed_temp, edge, &diam, &ASPL, cooling_cycle, &num_accepts, width, based_width, height, based_height, low_length, groups, rotate_hash, enable_fixed_temp); timer_stop(TIMER_SA); if(enable_detect_temp){ // Set max temperature to accept it 50% in maximum diff energy. PRINT_R0("Proposed max temperature is %f\n", (-1.0 * max_diff_energy) / log(0.5)); // Set min temperature to accept it 0.01% in minimum diff energy. END("Proposed min temperature is %f\n", (-2.0) / log(0.0001)); } // Output results PRINT_R0("---\n"); PRINT_R0("Diam. k = %d ASPL l = %f Diam. gap = %d ASPL gap = %f\n", diam, ASPL, diam-low_diam, ASPL-low_ASPL); double time_sa = timer_read(TIMER_SA); double time_apsp = timer_read(TIMER_APSP); double time_check = timer_read(TIMER_CHECK); PRINT_R0("Steps: %lld Elapse time: %f sec. (APSP: %f sec. Check: %f sec. Other: %f sec.)\n", step, time_sa, time_apsp, time_check, time_sa-(time_apsp+time_check)); if(ncalcs > SKIP_ACCEPTS) PRINT_R0("Accept rate: %f (= %lld/%lld)\n", (double)num_accepts/(ncalcs-SKIP_ACCEPTS), num_accepts, ncalcs-SKIP_ACCEPTS); if(rank == 0 && enable_outfname){ output_file(fp, lines, height, edge); fclose(fp); } verfy_graph(nodes, lines, edge, height, low_length, max_degree); MPI_Finalize(); free(edge); free(degree); free(rotate_hash); return 0; }
GB_unop__signum_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__signum_fc32_fc32) // op(A') function: GB (_unop_tran__signum_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_csignumf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_csignumf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_csignumf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__signum_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_csignumf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_csignumf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__signum_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SwathFileConsumer.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2015. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #ifndef OPENMS_FORMAT_DATAACCESS_SWATHFILECONSUMER_H #define OPENMS_FORMAT_DATAACCESS_SWATHFILECONSUMER_H #include <boost/cast.hpp> // Datastructures #include <OpenMS/ANALYSIS/OPENSWATH/OPENSWATHALGO/DATAACCESS/DataStructures.h> #include <OpenMS/ANALYSIS/OPENSWATH/OPENSWATHALGO/DATAACCESS/SwathMap.h> // Consumers #include <OpenMS/FORMAT/DATAACCESS/MSDataCachedConsumer.h> #include <OpenMS/FORMAT/DATAACCESS/MSDataTransformingConsumer.h> // Helpers #include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathHelper.h> #include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h> #include <OpenMS/INTERFACES/IMSDataConsumer.h> #include <OpenMS/FORMAT/CachedMzML.h> #ifdef _OPENMP #include <omp.h> #endif namespace OpenMS { /** * @brief Abstract base class which can consume spectra coming from SWATH experiment stored in a single file. * * The class consumes spectra which are coming from a complete SWATH * experiment. It will group MS2 spectra by their precursor m/z, assuming * that they correspond to the same SWATH window. For example, the spectra * could be arranged in the following fashion: * * - MS1 Spectrum (no precursor) * - MS2 Spectrum (precursor = [400,425]) * - MS2 Spectrum (precursor = [425,450]) * - [...] * - MS2 Spectrum (precursor = [1175,1200]) * - MS1 Spectrum (no precursor) * - MS2 Spectrum (precursor = [400,425]) * - MS2 Spectrum (precursor = [425,450]) * - [...] * * Base classes are expected to implement functions consuming a spectrum coming * from a specific SWATH or an MS1 spectrum and a final function * ensureMapsAreFilled_ after which the swath_maps_ vector needs to contain * valid pointers to MSExperiment. * * In addition it is possible to provide the swath boundaries and the read in * spectra will be matched by their precursor m/z to the "center" attribute * of the provided Swath maps. * * Usage: * * @code * FullSwathFileConsumer * dataConsumer; * // assign dataConsumer to an implementation of FullSwathFileConsumer * MzMLFile().transform(file, dataConsumer); * dataConsumer->retrieveSwathMaps(maps); * @endcode * */ class OPENMS_DLLAPI FullSwathFileConsumer : public Interfaces::IMSDataConsumer<> { public: typedef MSExperiment<> MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; FullSwathFileConsumer() : ms1_map_(), // initialize to null consuming_possible_(true), use_external_boundaries_(false), correct_window_counter_(0) { use_external_boundaries_ = !swath_map_boundaries_.empty(); } /** * @brief Constructor * * @param swath_boundaries A vector of SwathMaps of which only the center, * lower and upper attributes will be used to infer the expected Swath maps. * */ FullSwathFileConsumer(std::vector<OpenSwath::SwathMap> swath_boundaries) : swath_map_boundaries_(swath_boundaries), ms1_map_(), // initialize to null consuming_possible_(true), use_external_boundaries_(false), correct_window_counter_(0) { use_external_boundaries_ = !swath_map_boundaries_.empty(); } ~FullSwathFileConsumer() {} void setExpectedSize(Size, Size) {} void setExperimentalSettings(const ExperimentalSettings& exp) {settings_ = exp; } /** * @brief Populate the vector of swath maps after consuming all spectra. * * Will populate the input vector with SwathMap objects which correspond to * the MS1 map (if present) and the MS2 maps (SWATH maps). This should be * called after all spectra are consumed. * * @note It is not possible to consume any more spectra after calling this * function (it contains finalization code and may close file streams). * */ void retrieveSwathMaps(std::vector<OpenSwath::SwathMap>& maps) { consuming_possible_ = false; // make consumption of further spectra / chromatograms impossible ensureMapsAreFilled_(); if (ms1_map_) { OpenSwath::SwathMap map; map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(ms1_map_); map.lower = -1; map.upper = -1; map.center = -1; map.ms1 = true; maps.push_back(map); } // Print warning if the lower/upper window could not be determined and we // required manual determination of the boundaries. if (!use_external_boundaries_ && correct_window_counter_ != swath_maps_.size()) { std::cout << "WARNING: Could not correctly read the upper/lower limits of the SWATH windows from your input file. Read " << correct_window_counter_ << " correct (non-zero) window limits (expected " << swath_maps_.size() << " windows)." << std::endl; } size_t nonempty_maps = 0; for (Size i = 0; i < swath_maps_.size(); i++) { OpenSwath::SwathMap map; map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(swath_maps_[i]); map.lower = swath_map_boundaries_[i].lower; map.upper = swath_map_boundaries_[i].upper; map.center = swath_map_boundaries_[i].center; map.ms1 = false; maps.push_back(map); if (map.sptr->getNrSpectra() > 0) {nonempty_maps++;} } if (nonempty_maps != swath_map_boundaries_.size()) { std::cout << "WARNING: The number nonempty maps found in the input file (" << nonempty_maps << ") is not equal to the number of provided swath window boundaries (" << swath_map_boundaries_.size() << "). Please check your input." << std::endl; } } /// Consume a chromatogram -> should not happen when dealing with SWATH maps void consumeChromatogram(MapType::ChromatogramType&) { std::cerr << "Read chromatogram while reading SWATH files, did not expect that!" << std::endl; } /** * @brief * Consume a spectrum which may belong either to an MS1 scan or * one of n MS2 (SWATH) scans * */ void consumeSpectrum(MapType::SpectrumType& s) { if (!consuming_possible_) { throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "FullSwathFileConsumer cannot consume any more spectra after retrieveSwathMaps has been called already"); } if (s.getMSLevel() == 1) { consumeMS1Spectrum_(s); } else { if (s.getPrecursors().empty()) { throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Swath scan does not provide a precursor."); } const std::vector<Precursor> prec = s.getPrecursors(); double center = prec[0].getMZ(); double lower = prec[0].getMZ() - prec[0].getIsolationWindowLowerOffset(); double upper = prec[0].getMZ() + prec[0].getIsolationWindowUpperOffset(); bool found = false; // Check if enough information is present to infer the swath if (center <= 0.0) { throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Swath scan does not provide any precursor isolation information."); } // try to match the current scan to one of the already known windows for (Size i = 0; i < swath_map_boundaries_.size(); i++) { // We group by the precursor mz (center of the window) since this // should be present in all SWATH scans. if (std::fabs(center - swath_map_boundaries_[i].center) < 1e-6) { found = true; consumeSwathSpectrum_(s, i); } } if (!found) { if (use_external_boundaries_) { throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__, String("Encountered SWATH scan with boundary ") + center + " m/z which was not present in the provided windows."); } else { consumeSwathSpectrum_(s, swath_map_boundaries_.size()); // we found a new SWATH window if (lower > 0.0 && upper > 0.0) {correct_window_counter_++;} OpenSwath::SwathMap boundary; boundary.lower = lower; boundary.upper = upper; boundary.center = center; swath_map_boundaries_.push_back(boundary); LOG_DEBUG << "Adding Swath centered at " << center << " m/z with an isolation window of " << lower << " to " << upper << " m/z." << std::endl; } } } } protected: /** * @brief Consume an MS2 spectrum belonging to SWATH "swath_nr" * * This function should handle a spectrum belonging to a specific SWATH * (indicated by swath_nr). * */ virtual void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) = 0; /** * @brief Consume an MS1 spectrum * * This function should handle an MS1 spectrum. * */ virtual void consumeMS1Spectrum_(MapType::SpectrumType& s) = 0; /** * @brief Callback function after the reading is complete * * Has to ensure that swath_maps_ and ms1_map_ are correctly populated. */ virtual void ensureMapsAreFilled_() = 0; /// A list of Swath map identifiers (lower/upper boundary and center) std::vector<OpenSwath::SwathMap> swath_map_boundaries_; /// A list of SWATH maps and the MS1 map std::vector<boost::shared_ptr<MSExperiment<> > > swath_maps_; boost::shared_ptr<MSExperiment<> > ms1_map_; /// The Experimental settings // (MSExperiment has no constructor using ExperimentalSettings) MSExperiment<> settings_; /// Whether further spectra can still be consumed bool consuming_possible_; /// Whether to use external input for SWATH boundaries bool use_external_boundaries_; /// How many windows were correctly annotated (non-zero window limits) size_t correct_window_counter_; }; /** * @brief In-memory implementation of FullSwathFileConsumer * * Keeps all the spectra in memory by just appending them to an MSExperiment. * */ class OPENMS_DLLAPI RegularSwathFileConsumer : public FullSwathFileConsumer { public: typedef MSExperiment<> MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; RegularSwathFileConsumer() {} RegularSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries) : FullSwathFileConsumer(known_window_boundaries) {} protected: void addNewSwathMap_() { boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_)); swath_maps_.push_back(exp); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) { while (swath_maps_.size() <= swath_nr) { addNewSwathMap_(); } swath_maps_[swath_nr]->addSpectrum(s); } void addMS1Map_() { boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_)); ms1_map_ = exp; } void consumeMS1Spectrum_(MapType::SpectrumType& s) { if (!ms1_map_) { addMS1Map_(); } ms1_map_->addSpectrum(s); } void ensureMapsAreFilled_() {} }; /** * @brief On-disk cached implementation of FullSwathFileConsumer * * Writes all spectra immediately to disk in a user-specified caching * location using the MSDataCachedConsumer. Internally, it handles * n+1 (n SWATH + 1 MS1 map) objects of MSDataCachedConsumer which can consume the * spectra and write them to disk immediately. * */ class OPENMS_DLLAPI CachedSwathFileConsumer : public FullSwathFileConsumer { public: typedef MSExperiment<> MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; CachedSwathFileConsumer(String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) : ms1_consumer_(NULL), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} CachedSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries, String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) : FullSwathFileConsumer(known_window_boundaries), ms1_consumer_(NULL), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} ~CachedSwathFileConsumer() { // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != NULL) { delete ms1_consumer_; ms1_consumer_ = NULL; } } protected: void addNewSwathMap_() { String meta_file = cachedir_ + basename_ + "_" + String(swath_consumers_.size()) + ".mzML"; String cached_file = meta_file + ".cached"; MSDataCachedConsumer* consumer = new MSDataCachedConsumer(cached_file, true); consumer->setExpectedSize(nr_ms2_spectra_[swath_consumers_.size()], 0); swath_consumers_.push_back(consumer); // maps for meta data boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_)); swath_maps_.push_back(exp); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) { while (swath_maps_.size() <= swath_nr) { addNewSwathMap_(); } swath_consumers_[swath_nr]->consumeSpectrum(s); swath_maps_[swath_nr]->addSpectrum(s); // append for the metadata (actual data is deleted) } void addMS1Map_() { String meta_file = cachedir_ + basename_ + "_ms1.mzML"; String cached_file = meta_file + ".cached"; ms1_consumer_ = new MSDataCachedConsumer(cached_file, true); ms1_consumer_->setExpectedSize(nr_ms1_spectra_, 0); boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>(settings_)); ms1_map_ = exp; } void consumeMS1Spectrum_(MapType::SpectrumType& s) { if (ms1_consumer_ == NULL) { addMS1Map_(); } ms1_consumer_->consumeSpectrum(s); ms1_map_->addSpectrum(s); // append for the metadata (actual data is deleted) } void ensureMapsAreFilled_() { size_t swath_consumers_size = swath_consumers_.size(); bool have_ms1 = (ms1_consumer_ != NULL); // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream // The file streams to the cached data on disc can and should be closed // here safely. Since ensureMapsAreFilled_ is called after consuming all // the spectra, there will be no more spectra to append but the client // might already want to read after this call, so all data needs to be // present on disc and the file streams closed. // // TODO merge with destructor code into own function! while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != NULL) { delete ms1_consumer_; ms1_consumer_ = NULL; } if (have_ms1) { boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>); String meta_file = cachedir_ + basename_ + "_ms1.mzML"; // write metadata to disk and store the correct data processing tag CachedmzML().writeMetadata(*ms1_map_, meta_file, true); MzMLFile().load(meta_file, *exp.get()); ms1_map_ = exp; } #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_consumers_size); i++) { boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>); String meta_file = cachedir_ + basename_ + "_" + String(i) + ".mzML"; // write metadata to disk and store the correct data processing tag CachedmzML().writeMetadata(*swath_maps_[i], meta_file, true); MzMLFile().load(meta_file, *exp.get()); swath_maps_[i] = exp; } } MSDataCachedConsumer* ms1_consumer_; std::vector<MSDataCachedConsumer*> swath_consumers_; String cachedir_; String basename_; int nr_ms1_spectra_; std::vector<int> nr_ms2_spectra_; }; } #endif
sumstats.h
/* Copyright (c) 2015-2016 Drew Schmidt All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __COOP_LIB_SUMSTATS_H__ #define __COOP_LIB_SUMSTATS_H__ #include "safeomp.h" // x[*, j] -= colmean(x[*, j]) static inline void remove_colmeans(const int m, const int n, double *restrict x) { if (m == 0 || n == 0) return; const double div = 1. / ((double) m); #pragma omp parallel for shared(x) if(m*n > OMP_MIN_SIZE) for (int j=0; j<n; j++) { double colmean = 0; // Get column mean SAFE_SIMD for (int i=0; i<m; i++) colmean += x[i + m*j]; colmean *= div; // Remove mean from column SAFE_SIMD for (int i=0; i<m; i++) x[i + m*j] -= colmean; } } // same as above but return the means vector static inline void remove_colmeans_retmean(const int m, const int n, double *restrict x, double *restrict colmeans) { if (m == 0 || n == 0) return; const double div = 1. / ((double) m); #pragma omp parallel for shared(x, colmeans) if(m*n > OMP_MIN_SIZE) for (int j=0; j<n; j++) { colmeans[j] = 0; // Get column mean SAFE_SIMD for (int i=0; i<m; i++) colmeans[j] += x[i + m*j]; colmeans[j] *= div; // Remove mean from column SAFE_SIMD for (int i=0; i<m; i++) x[i + m*j] -= colmeans[j]; } } // compute the mean of a vector static inline double mean(const int n, const double * const restrict x) { const double divbyn = 1. / ((double) n); double mean = 0.; PLEASE_VECTORIZE for (int i=0; i<n; i++) mean += x[i]; return mean*divbyn; } #endif
little_wkld.c
/* * Code to simulate a "little"-core workload for lab assignment in [A2] Task Mapping on Soft Heterogeneous Systems. * Workload consists of a parallel implementation of matrix-vector multiplication (not optimized). * Implementation is not optimized! Only meant to be used in conjunction with lab assignment. * * Clock frequency has low impact on the performance of this workload. In a task mapping scenario, * this workload should be mapped to the "little" cores to minimize performance loss. * * @author: Apan Qasem <apan@txstate.edu> * @date: 04/02/20 * * @update: 03/09/21 */ #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include<unistd.h> #include<omp.h> #define VAL_RANGE 1024 /* timer function */ double get_time_in_seconds() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } long dot_prod(long *x, long *y, long n) { long sum = 0; long i; #pragma omp parallel for reduction(+:sum) for (i = 0; i < n; i++) sum += x[i] * y[i]; return sum; } void matrix_vector_mult(long **mat, long *vec, long *result, long long rows, long long cols) { /* not parallelelized to ensure runtimes are more meaningful */ long i; for (i = 0; i < rows; i++) result[i] = dot_prod(mat[i], vec, cols); } void display_matrix(const long **matrix, long long N) { long i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) printf("%lu ", matrix[i][j]); printf("\n"); } } int main(int argc, char *argv[]) { if (argc < 4) { printf("usage: \n"); printf(" ./matvec N n t\n"); printf(" N = matrix dimension\n"); printf(" n = number of reps\n"); printf(" t = number of threads\n"); exit(0); } /* matrix dimenstion, assume N x N matrix and vector of size of N*/ long long N = atoi(argv[1]); /* number of reps; control running time of program */ unsigned n = atoi(argv[2]); /* number of OpenMP threads */ unsigned threads = atoi(argv[3]); omp_set_num_threads(threads); long **matrix; long *vec; long *result; long i, j; double start_time, end_time; /* memory allocation and initilization */ #ifdef INIT_TIMER start_time = get_time_in_seconds(); #endif matrix = (long **) malloc(sizeof(long *) * N); for (i = 0; i < N; i++) matrix[i] = (long *) malloc(sizeof(long) * N); vec = (long *) malloc(sizeof(long) * N); result = (long *) malloc(sizeof(long) * N); for (i = 0; i < N; i++) for (j = 0; j < N; j++) matrix[i][j] = rand() / (long) (RAND_MAX/VAL_RANGE); for (i = 0; i < N; i++) vec[i] = rand() / (long) (RAND_MAX/VAL_RANGE); #ifdef INIT_TIMER end_time = get_time_in_seconds(); fprintf(stdout, "Initialization time = %.3f s\n", end_time - start_time); #endif /* computation */ start_time = get_time_in_seconds(); for (i = 0; i < n; i++) matrix_vector_mult(matrix, vec, result, N, N); sleep(3); end_time = get_time_in_seconds(); /* verification (by inspection only) */ #ifdef VERIFY fprintf(stdout, "Verification: "); for (unsigned i = 0; i < 1; i++) fprintf(stdout, "result[%d] = %lu\n", i, result[i]); #endif fprintf(stdout, "\033[1;35m[wk1] compute time = %.3f s\n\033[0m", end_time - start_time); return 0; }
MatrixProjection.c
/* ** Matrix Projection, ** Ref: Lohmann, Loktyushin, Stelzer, Scheffler (2018) bioRXiv ** ** G.Lohmann, MPI-KYB, Nov 2018 */ #include <viaio/Vlib.h> #include <viaio/VImage.h> #include <viaio/mu.h> #include <viaio/option.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_cblas.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_linalg.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /*_OPENMP*/ #define SQR(x) ((x) * (x)) #define IMIN(x,y) ((x) < (y) ? (x) : (y)) #define IMAX(x,y) ((x) > (y) ? (x) : (y)) extern double ECMcorrelation(const float *arr1,const float *arr2,size_t nt,int type); extern void NormVec(float *x,size_t); void VMatrixProjection(gsl_matrix_float *X,float *ev,int type,int seed) { size_t i,j; size_t nvox = X->size1; size_t nt = X->size2; size_t p = 32; fprintf(stderr," VMatrixProjection...\n"); /* generate random matrix */ const gsl_rng_type *Tx = gsl_rng_default; gsl_rng *rx = gsl_rng_alloc (Tx); gsl_rng_set(rx,(unsigned long int)seed); gsl_matrix *R = gsl_matrix_calloc(nvox,p); for (i=0; i<nvox; i++) { for (j=0; j<p; j++) { gsl_matrix_set(R,i,j,gsl_ran_ugaussian(rx)); } } /* matrix product 1 */ gsl_matrix *Y = gsl_matrix_calloc(nvox,p); size_t progress=0; size_t step=4; /* sparse */ fprintf(stderr," pass 1:\n"); #pragma omp parallel for shared(progress) schedule(guided) firstprivate(X,Y,R) for (i=0; i<nvox; i++) { if (i%1000 == 0) fprintf(stderr," %d000 of %lu\r",(int)(++progress),nvox); size_t j=0,k=0; double *tmp1 = (double *)VCalloc(nvox,sizeof(double)); double *tmp2 = NULL; double sum=0; const float *arr1 = gsl_matrix_float_const_ptr(X,i,0); for (j=0; j<nvox; j++) { const float *arr2 = gsl_matrix_float_const_ptr(X,j,0); tmp1[j] = ECMcorrelation(arr1,arr2,nt,type); } for (j=0; j<p; j++) { tmp2 = gsl_matrix_ptr(R,j,0); sum=0; for (k=0; k<nvox; k+=step) { sum += tmp1[k]*tmp2[k]; } gsl_matrix_set(Y,i,j,sum); } VFree(tmp1); } fprintf(stderr,"\n"); gsl_matrix_free(R); /* get orthonormal basis for Y (SVD) */ gsl_vector *sv = gsl_vector_calloc(p); gsl_vector *work = gsl_vector_calloc(p); gsl_matrix *V = gsl_matrix_calloc(p,p); gsl_linalg_SV_decomp(Y,V,sv,work); /* matrix product 2 */ fprintf(stderr," pass 2:\n"); gsl_matrix *D = gsl_matrix_calloc(nvox,p); progress=0; #pragma omp parallel for shared(progress) schedule(guided) firstprivate(X,Y,D) for (i=0; i<nvox; i++) { if (i%1000 == 0) fprintf(stderr," %d000 of %lu\r",(int)(++progress),nvox); size_t j=0,k=0; double *tmp1 = (double *)VCalloc(nvox,sizeof(double)); double sum=0; const float *arr1 = gsl_matrix_float_const_ptr(X,i,0); for (j=0; j<nvox; j++) { const float *arr2 = gsl_matrix_float_const_ptr(X,j,0); tmp1[j] = ECMcorrelation(arr1,arr2,nt,type); } for (j=0; j<p; j++) { sum=0; for (k=0; k<nvox; k+=step) { sum += tmp1[k] * gsl_matrix_get(Y,k,j); } gsl_matrix_set(D,i,j,sum); } VFree(tmp1); } fprintf(stderr,"\n"); gsl_matrix_free(Y); /* final SVD */ gsl_linalg_SV_decomp(D,V,sv,work); /* output */ size_t k=0; for (i=0; i<nvox; i++) { ev[i] = gsl_matrix_get(D,i,0); if (ev[i] < 0) k++; } /* sign switch if needed */ if (k > nvox/2) { for (i=0; i<nvox; i++) { ev[i] = -ev[i]; } } /* normalize */ NormVec(ev,nvox); float kx = sqrt((float)(nvox)); for (i=0; i<nvox; i++) ev[i] *= kx; }
population.h
#pragma once #include <algorithm> #include <atomic> #include <cassert> #include <cstdlib> #include <iostream> #include <functional> #include <memory> #include <vector> #ifdef _OPENMP #include <omp.h> #endif #include "random_wrapper.h" enum class Origin { Initialization = 0, Recombination = 1, Mutation = 2, }; static inline std::ostream& operator<< (std::ostream& stream, const Origin& o) { switch (o) { case Origin::Initialization: stream << "Initialization"; break; case Origin::Recombination: stream << "Recombination"; break; case Origin::Mutation: stream << "Mutation"; break; } return stream; } /** A data structure for representing the population in a genetic algorithm * * All functionality except non-modifying iteration and insertion of new * children is not thread safe. * * The template parameter `elem_t` is the type of the individuals in the * population. The following member types have to be defined: * - `elem_t::Fitness` for a representation of its fitness * - `elem_t::EvalInfo` for data that are required for evaluation */ template<typename elem_t> class Population { public: struct Entry { elem_t elem; typename elem_t::Fitness fitness_val; size_t accumulated_position = 0; // used for rank-combined sorting float accumulated_value = 0.0; // used for ratio-combined sorting bool evaluated = false; size_t birth_generation; Origin origin; Entry(size_t birth_generation, Origin origin) : birth_generation(birth_generation), origin(origin) { } void evaluate(const typename elem_t::EvalInfo &eval_info) { if (evaluated) { return; } elem.evaluate(this->fitness_val, eval_info); evaluated = true; } // friend bool operator< ( Entry const& a, Entry const& b) { // return a.fitness_val < b.fitness_val; // } friend std::ostream& operator<< (std::ostream& stream, const Entry& entry) { stream << entry.elem << "\n# with fitness value "; stream << entry.fitness_val; stream << "\n# created in generation " << entry.birth_generation << " from " << entry.origin; stream << "\n"; return stream; } }; using iterator_t = typename std::vector<Entry*>::iterator; static typename std::unique_ptr<Population<elem_t>> create(size_t pop_size, size_t child_num) { return std::unique_ptr<Population>(new Population(pop_size, child_num)); } ~Population(void) { // creepy things we have to do because of placement new for (size_t i = 0; i < first_free_pop; ++i) { Arena[i]->~Entry(); } for (size_t i = start_dead_zone; i < first_free_child; ++i) { Arena[i]->~Entry(); } free(Storage); } void shuffle(RandomWrapper &rw) { assert(finalized); rw.shuffle(Arena.begin(), Arena.begin() + first_free_pop); } /// Sort the entire population (children and residual individuals). /// Sorting works as follows: /// The fitness type is expected to define a set of groups of fitness /// orderings (based on serveral different fitness aspects). /// The population is sorted wrt. each of these fitness ordering groups and /// for each sorting, the respective position is annotated to the /// individuals. The final sorting is done wrt. the sums of positions. /// Effectively, individuals are sorted wrt. the arithmetic mean of their /// performance in all ordering groups. void rank_sort(void) { assert(finalized); assert(first_free_pop == start_dead_zone); auto end_it = Arena.begin() + first_free_child; // initialize the accumulated sum of all positions for each individual for (auto it = Arena.begin(); it != end_it; ++it) { (*it)->accumulated_position = 0; } for (int group_idx = 0; group_idx <= elem_t::Fitness::getMaxGroup(); ++group_idx) { // sort wrt. each ordering group... std::sort(Arena.begin(), end_it, [&](const auto& a, const auto &b){ return elem_t::Fitness::compare(a->fitness_val, b->fitness_val, group_idx) == -1; } ); // ...and accumulate the respective positions for each individual size_t idx = 0; for (auto it = Arena.begin(); it != end_it; ++it) { (*it)->accumulated_position += idx; ++idx; } } // final sorting according to the accumulated positions std::sort(Arena.begin(), end_it, [](const auto& a, const auto &b){ return a->accumulated_position < b->accumulated_position; } ); } /// Sort the entire population like rank_sort, however do not use the /// positions after sorting but use for each individual the sum of all /// component fitness values after applying a linear scale to a fixed /// interval. /// This puts more focus the magnitude of fitness improvements. void ratio_sort(void) { assert(finalized); assert(first_free_pop == start_dead_zone); auto end_it = Arena.begin() + first_free_child; // initialize the accumulated sum of all positions for each individual for (auto it = Arena.begin(); it != end_it; ++it) { (*it)->accumulated_value = 0.0; } float range_min = 1; float range_max = 1000; for (int group_idx = 0; group_idx <= elem_t::Fitness::getMaxGroup(); ++group_idx) { float max_val = 0.0; float min_val = 0.0; for (auto it = Arena.begin(); it != end_it; ++it) { float val = (*it)->fitness_val.getComponentValue(group_idx); max_val = std::max(max_val, val); min_val = std::min(min_val, val); } for (auto it = Arena.begin(); it != end_it; ++it) { float val = (*it)->fitness_val.getComponentValue(group_idx); float x; // apply a linear transform to map it into [range_min, range_max] if (max_val == min_val) { x = range_min; } else { x = (((range_max - range_min) * (val - min_val)) / (max_val - min_val)) + 1; } (*it)->accumulated_value += x; } } // final sorting according to the accumulated positions std::sort(Arena.begin(), end_it, [](const auto& a, const auto &b){ return a->accumulated_value < b->accumulated_value; } ); } void swap(size_t idx1, size_t idx2) { assert(finalized); assert(0 <= idx1 && idx1 < first_free_child); assert(0 <= idx2 && idx2 < first_free_child); auto tmp = Arena[idx1]; Arena[idx1] = Arena[idx2]; Arena[idx2] = tmp; } size_t getPopEndIdx(void) { return first_free_pop; } size_t getChildrenEndIdx(void) { return first_free_child; } void purge(void) { assert(finalized); // creepy things we have to do because of placement new for (size_t i = start_dead_zone; i < first_free_child; ++i) { Arena[i]->~Entry(); } first_free_child = start_dead_zone; current_generation += 1; } Entry *insertPop(Origin origin=Origin::Initialization) { assert(not finalized); size_t pos = first_free_pop.fetch_add(1); assert(pos < start_dead_zone); Entry *res = Arena[pos]; //placement new new (res) Entry(current_generation, origin); return res; } Entry *replacePop(size_t idx, Origin origin=Origin::Initialization) { auto* e = Arena[idx]; e->~Entry(); new (e) Entry(current_generation, origin); return e; } Entry *insertChild(Origin origin) { assert(finalized); size_t pos = first_free_child.fetch_add(1); assert(pos < num_elements); Entry *res = Arena[pos]; //placement new new (res) Entry(current_generation, origin); return res; } typename std::vector<Entry*>::iterator getPopBegin(void) { assert(finalized); return Arena.begin(); } typename std::vector<Entry*>::const_iterator getPopBegin(void) const { assert(finalized); return Arena.begin(); } typename std::vector<Entry*>::iterator getPopEnd(void) { assert(finalized); return Arena.begin() + first_free_pop; } typename std::vector<Entry*>::const_iterator getPopEnd(void) const { assert(finalized); return Arena.begin() + first_free_pop; } void forall_entries(std::function<void(Entry&)> stmt) { size_t pop_size = first_free_pop; #pragma omp parallel for for (size_t i = 0; i < pop_size; ++i) { auto &entry = this->getPopAt(i); stmt(entry); } } void forall_chunks(std::function<void(typename std::vector<Entry*>::iterator, typename std::vector<Entry*>::iterator)> stmt) { auto pop_begin = this->getPopBegin(); #pragma omp parallel for for (size_t i = 0; i < num_chunks; ++i) { size_t chunk_start = i * chunk_size; size_t chunk_end = std::min(chunk_start + chunk_size, (size_t)first_free_pop); auto start_it = pop_begin + chunk_start; auto end_it = pop_begin + chunk_end; stmt(start_it, end_it); } } Entry& getPopAt(size_t idx) { assert(finalized); assert((0 <= idx) && (idx < first_free_pop)); return *Arena.at(idx); } /** Finalize construction phase of the initial population * * This checks several invariants that have to hold for the population at * any point after calling this function. This method has to be called * before new children can be added or iterated. */ void finalize(void) { assert(num_elements > start_dead_zone); assert(first_free_pop == start_dead_zone); assert(first_free_child == start_dead_zone); assert(start_dead_zone > 0); current_generation += 1; finalized = true; } void printJournal(std::ostream& journal, int indent) { add_indent(journal, indent); journal << "[\n"; bool first = true; for (auto it = this->getPopBegin(); it != this->getPopEnd(); ++it) { if (!first) { journal << ",\n"; } first = true; add_indent(journal, indent + 2); journal << "\""; journal << (*it)->fitness_val; journal << "\""; } journal << "\n"; add_indent(journal, indent); journal << "]\n"; } double computeDiversity(void) const { double result = 0; auto pop_end = getPopEnd(); for (auto it_a = getPopBegin(); it_a < pop_end; ++it_a) { for (auto it_b = it_a + 1; it_b < pop_end; ++it_b) { result += elem_t::distance((*it_a)->elem, (*it_b)->elem); } } return result / (double)first_free_pop; } size_t getCurrentGeneration(void) { return current_generation; } private: Population(size_t pop_size, size_t child_num) : num_elements(pop_size + child_num), Arena(num_elements) { start_dead_zone = pop_size; Storage = (Entry*)malloc(num_elements * sizeof(Entry)); for (size_t i = 0; i < num_elements; ++i) { Arena[i] = &Storage[i]; } first_free_pop = 0; first_free_child = start_dead_zone; #ifdef _OPENMP num_chunks = omp_get_max_threads(); #else num_chunks = 1; #endif chunk_size = pop_size / num_chunks; } void add_indent(std::ostream& stream, int indent) { for (int i = 0; i < indent; ++i) { stream << " "; } } size_t num_elements; size_t start_dead_zone; std::atomic<size_t> first_free_pop; std::atomic<size_t> first_free_child; size_t num_chunks = 1; size_t chunk_size; bool finalized = false; size_t current_generation = 0; std::vector<Entry*> Arena; Entry *Storage; };
elect_energy_avx512.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <immintrin.h> /* gcc -o evec1 elect_energy_vec_01.c -O4 -lm -fopenmp -march=native */ int main(int argc, char **argv) { struct timespec ts_start, ts_end; float time_total; int i, j, m, ix, iy, iz; int size = 60; int n_charges = size*size*size; long v_element_count = 0; long v_count = 0; float scale=0.5; float tmp_vec[4][16] __attribute__ ((aligned (64))); double VC = .0f; __m512 *X, *Y, *Z, *Q; __m512 tmpQ[16], tmpX[16], tmpY[16], tmpZ[16]; __m512 r_vec, result, vcps, diff[16], mask[16]; X = aligned_alloc(64, (n_charges+16) * sizeof(float)); Y = aligned_alloc(64, (n_charges+16) * sizeof(float)); Z = aligned_alloc(64, (n_charges+16) * sizeof(float)); Q = aligned_alloc(64, (n_charges+16) * sizeof(float)); /* Initialize X,Y,Z,Q arrays with 512-bit long vectors */ for (ix=0; ix<size; ix++) for (iy=0; iy<size; iy++) for (iz=0; iz<size; iz++) { tmp_vec[0][v_element_count] = ix*scale; tmp_vec[1][v_element_count] = iy*scale; tmp_vec[2][v_element_count] = iz*scale; tmp_vec[3][v_element_count] = 0.33; v_element_count++; /* when 16 elements are computed pack them into _m512 vectors */ if ( v_element_count == 16 ) { X[v_count] = _mm512_set_ps( \ tmp_vec[0][15],tmp_vec[0][14],tmp_vec[0][13],tmp_vec[0][12], \ tmp_vec[0][11],tmp_vec[0][10],tmp_vec[0][9],tmp_vec[0][8], \ tmp_vec[0][7],tmp_vec[0][6],tmp_vec[0][5],tmp_vec[0][4], \ tmp_vec[0][3],tmp_vec[0][2],tmp_vec[0][1],tmp_vec[0][0]); Y[v_count] = _mm512_set_ps( \ tmp_vec[1][15],tmp_vec[1][14],tmp_vec[1][13],tmp_vec[1][12], \ tmp_vec[1][11],tmp_vec[1][10],tmp_vec[1][9],tmp_vec[1][8], \ tmp_vec[1][7],tmp_vec[1][6],tmp_vec[1][5],tmp_vec[1][4], \ tmp_vec[1][3],tmp_vec[1][2],tmp_vec[1][1],tmp_vec[1][0]); Z[v_count] = _mm512_set_ps( \ tmp_vec[2][15],tmp_vec[2][14],tmp_vec[2][13],tmp_vec[2][12], \ tmp_vec[2][11],tmp_vec[2][10],tmp_vec[2][9],tmp_vec[2][8], \ tmp_vec[2][7],tmp_vec[2][6],tmp_vec[2][5],tmp_vec[2][4], \ tmp_vec[2][3],tmp_vec[2][2],tmp_vec[2][1],tmp_vec[2][0]); Q[v_count] = _mm512_set_ps( \ tmp_vec[3][15],tmp_vec[3][14],tmp_vec[3][13],tmp_vec[3][12], \ tmp_vec[3][11],tmp_vec[3][10],tmp_vec[3][9],tmp_vec[3][8], \ tmp_vec[3][7],tmp_vec[3][6],tmp_vec[3][5],tmp_vec[3][4], \ tmp_vec[3][3],tmp_vec[3][2],tmp_vec[3][1],tmp_vec[3][0]); v_count++; v_element_count=0; memset(tmp_vec,0,64*sizeof(float)); } } /* Treat the remainder. The last vector is padded with zeros */ if ( v_element_count !=0 ) { X[v_count] = _mm512_set_ps( \ tmp_vec[0][15],tmp_vec[0][14],tmp_vec[0][13],tmp_vec[0][12], \ tmp_vec[0][11],tmp_vec[0][10],tmp_vec[0][9],tmp_vec[0][8], \ tmp_vec[0][7],tmp_vec[0][6],tmp_vec[0][5],tmp_vec[0][4], \ tmp_vec[0][3],tmp_vec[0][2],tmp_vec[0][1],tmp_vec[0][0]); Y[v_count] = _mm512_set_ps( \ tmp_vec[1][15],tmp_vec[1][14],tmp_vec[1][13],tmp_vec[1][12], \ tmp_vec[1][11],tmp_vec[1][10],tmp_vec[1][9],tmp_vec[1][8], \ tmp_vec[1][7],tmp_vec[1][6],tmp_vec[1][5],tmp_vec[1][4], \ tmp_vec[1][3],tmp_vec[1][2],tmp_vec[1][1],tmp_vec[1][0]); Z[v_count] = _mm512_set_ps( \ tmp_vec[2][15],tmp_vec[2][14],tmp_vec[2][13],tmp_vec[2][12], \ tmp_vec[2][11],tmp_vec[2][10],tmp_vec[2][9],tmp_vec[2][8], \ tmp_vec[2][7],tmp_vec[2][6],tmp_vec[2][5],tmp_vec[2][4], \ tmp_vec[2][3],tmp_vec[2][2],tmp_vec[2][1],tmp_vec[2][0]); Q[v_count] = _mm512_set_ps( \ tmp_vec[3][15],tmp_vec[3][14],tmp_vec[3][13],tmp_vec[3][12], \ tmp_vec[3][11],tmp_vec[3][10],tmp_vec[3][9],tmp_vec[3][8], \ tmp_vec[3][7],tmp_vec[3][6],tmp_vec[3][5],tmp_vec[3][4], \ tmp_vec[3][3],tmp_vec[3][2],tmp_vec[3][1],tmp_vec[3][0]); v_count++; } /* mask upper triangular matrix elements */ mask[0]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0); mask[1]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0); mask[2]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0); mask[3]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0); mask[4]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0); mask[5]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0); mask[6]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0); mask[7]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0); mask[8]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0); mask[9]=(__m512)_mm512_set_epi32 (-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); mask[10]=(__m512)_mm512_set_epi32(-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); mask[11]=(__m512)_mm512_set_epi32(-1,-1,-1,-1, 0 ,0, 0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0); mask[12]=(__m512)_mm512_set_epi32(-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); mask[13]=(__m512)_mm512_set_epi32(-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); mask[14]=(__m512)_mm512_set_epi32(-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); mask[15]=(__m512)_mm512_set_epi32( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); clock_gettime(CLOCK_MONOTONIC, &ts_start); #pragma omp parallel for \ private(tmpQ,tmpX,tmpY,tmpZ,i,j,m,diff,r_vec,vcps,result)\ reduction(+:VC) schedule(dynamic) for(i=0; i<v_count; i++) { /* For each i prepare 16 - element X, Y, Z, and Q vectors */ tmpQ[0] = _mm512_set1_ps(Q[i][0]); tmpQ[1] = _mm512_set1_ps(Q[i][1]); tmpQ[2] = _mm512_set1_ps(Q[i][2]); tmpQ[3] = _mm512_set1_ps(Q[i][3]); tmpQ[4] = _mm512_set1_ps(Q[i][4]); tmpQ[5] = _mm512_set1_ps(Q[i][5]); tmpQ[6] = _mm512_set1_ps(Q[i][6]); tmpQ[7] = _mm512_set1_ps(Q[i][7]); tmpQ[8] = _mm512_set1_ps(Q[i][8]); tmpQ[9] = _mm512_set1_ps(Q[i][9]); tmpQ[10] = _mm512_set1_ps(Q[i][10]); tmpQ[11] = _mm512_set1_ps(Q[i][11]); tmpQ[12] = _mm512_set1_ps(Q[i][12]); tmpQ[13] = _mm512_set1_ps(Q[i][13]); tmpQ[14] = _mm512_set1_ps(Q[i][14]); tmpQ[15] = _mm512_set1_ps(Q[i][15]); tmpX[0] = _mm512_set1_ps(X[i][0]); tmpX[1] = _mm512_set1_ps(X[i][1]); tmpX[2] = _mm512_set1_ps(X[i][2]); tmpX[3] = _mm512_set1_ps(X[i][3]); tmpX[4] = _mm512_set1_ps(X[i][4]); tmpX[5] = _mm512_set1_ps(X[i][5]); tmpX[6] = _mm512_set1_ps(X[i][6]); tmpX[7] = _mm512_set1_ps(X[i][7]); tmpX[8] = _mm512_set1_ps(X[i][8]); tmpX[9] = _mm512_set1_ps(X[i][9]); tmpX[10] = _mm512_set1_ps(X[i][10]); tmpX[11] = _mm512_set1_ps(X[i][11]); tmpX[12] = _mm512_set1_ps(X[i][12]); tmpX[13] = _mm512_set1_ps(X[i][13]); tmpX[14] = _mm512_set1_ps(X[i][14]); tmpX[15] = _mm512_set1_ps(X[i][15]); tmpY[0] = _mm512_set1_ps(Y[i][0]); tmpY[1] = _mm512_set1_ps(Y[i][1]); tmpY[2] = _mm512_set1_ps(Y[i][2]); tmpY[3] = _mm512_set1_ps(Y[i][3]); tmpY[4] = _mm512_set1_ps(Y[i][4]); tmpY[5] = _mm512_set1_ps(Y[i][5]); tmpY[6] = _mm512_set1_ps(Y[i][6]); tmpY[7] = _mm512_set1_ps(Y[i][7]); tmpY[8] = _mm512_set1_ps(Y[i][8]); tmpY[9] = _mm512_set1_ps(Y[i][9]); tmpY[10] = _mm512_set1_ps(Y[i][10]); tmpY[11] = _mm512_set1_ps(Y[i][11]); tmpY[12] = _mm512_set1_ps(Y[i][12]); tmpY[13] = _mm512_set1_ps(Y[i][13]); tmpY[14] = _mm512_set1_ps(Y[i][14]); tmpY[15] = _mm512_set1_ps(Y[i][15]); tmpZ[0] = _mm512_set1_ps(Z[i][0]); tmpZ[1] = _mm512_set1_ps(Z[i][1]); tmpZ[2] = _mm512_set1_ps(Z[i][2]); tmpZ[3] = _mm512_set1_ps(Z[i][3]); tmpZ[4] = _mm512_set1_ps(Z[i][4]); tmpZ[5] = _mm512_set1_ps(Z[i][5]); tmpZ[6] = _mm512_set1_ps(Z[i][6]); tmpZ[7] = _mm512_set1_ps(Z[i][7]); tmpZ[8] = _mm512_set1_ps(Z[i][8]); tmpZ[9] = _mm512_set1_ps(Z[i][9]); tmpZ[10] = _mm512_set1_ps(Z[i][10]); tmpZ[11] = _mm512_set1_ps(Z[i][11]); tmpZ[12] = _mm512_set1_ps(Z[i][12]); tmpZ[13] = _mm512_set1_ps(Z[i][13]); tmpZ[14] = _mm512_set1_ps(Z[i][14]); tmpZ[15] = _mm512_set1_ps(Z[i][15]); /* Accumulate interactions between all lower triangular elements of the diagonal 16x16 blocks in the vector 'vcps' */ vcps = _mm512_setzero_ps(); for(m=0; m<16; m++) { /* compute dx,dy,dz */ diff[0] = _mm512_sub_ps(tmpX[m],X[i]); diff[1] = _mm512_sub_ps(tmpY[m],Y[i]); diff[2] = _mm512_sub_ps(tmpZ[m],Z[i]); /* compute dx*dx + dy*dy + dz*dz */ r_vec = _mm512_fmadd_ps(diff[0],diff[0],_mm512_setzero_ps()); r_vec = _mm512_fmadd_ps(diff[1],diff[1],r_vec); r_vec = _mm512_fmadd_ps(diff[2],diff[2],r_vec); /* compute reciprocal distance [m][i]*/ r_vec = _mm512_rsqrt14_ps(r_vec); /* compute Q[m]*Q[i]/distance */ result = _mm512_mul_ps(tmpQ[m],Q[i]); result = _mm512_mul_ps(result,r_vec); result = _mm512_and_ps(mask[m],result); vcps = _mm512_add_ps(vcps,result); } /* sum all elements of 'vcps' vector and transfer the result into double precision accumulator VC */ VC += _mm512_reduce_add_ps(vcps); /* Accumulate interactions between all elements of the lower triangular 16x16 blocks in the vector 'vcps' */ for(j=i+1 ; j<v_count; j++) { vcps = _mm512_setzero_ps(); for(m=0; m<16; m++) { diff[0] = _mm512_sub_ps(tmpX[m],X[j]); diff[1] = _mm512_sub_ps(tmpY[m],Y[j]); diff[2] = _mm512_sub_ps(tmpZ[m],Z[j]); r_vec = _mm512_fmadd_ps(diff[0],diff[0],_mm512_setzero_ps()); r_vec = _mm512_fmadd_ps(diff[1],diff[1],r_vec); r_vec = _mm512_fmadd_ps(diff[2],diff[2],r_vec); r_vec = _mm512_rsqrt14_ps(r_vec); result = _mm512_mul_ps(tmpQ[m],Q[j]); vcps = _mm512_fmadd_ps(result,r_vec,vcps); } VC += _mm512_reduce_add_ps(vcps); } } clock_gettime(CLOCK_MONOTONIC, &ts_end); time_total = (ts_end.tv_sec - ts_start.tv_sec)*1e9 + (ts_end.tv_nsec - ts_start.tv_nsec); printf("\nTotal time is %f ms, Energy is %f\n", time_total/1e6,VC); printf("%i\n", v_count); }
GB_unop__identity_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: (none) // op(A') function: GB_unop_tran__identity_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
2mm.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "2mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nl), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NL,NJ,nl,nj), DATA_TYPE POLYBENCH_2D(D,NI,NL,ni,nl)) { int i __attribute__((annotate("scalar(range(0, " PB_XSTR(NK) ") final)"))); int j __attribute__((annotate("scalar(range(0, " PB_XSTR(NL) ") final)"))); *alpha = 32412; *beta = 2123; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nl; i++) for (j = 0; j < nj; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(D,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, D[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_2mm(int ni, int nj, int nk, int nl, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(tmp,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NL,NJ,nl,nj), DATA_TYPE POLYBENCH_2D(D,NI,NL,ni,nl)) { int i, j, k; #pragma scop /* D := alpha*A*B*C + beta*D */ #pragma omp parallel { #pragma omp for private (j, k) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { tmp[i][j] = 0; for (k = 0; k < _PB_NK; ++k) tmp[i][j] += alpha * A[i][k] * B[k][j]; } #pragma omp for private (j, k) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { D[i][j] *= beta; for (k = 0; k < _PB_NJ; ++k) D[i][j] += tmp[i][k] * C[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; /* Variable declaration/allocation. */ DATA_TYPE __attribute__((annotate("scalar()"))) alpha; DATA_TYPE __attribute__((annotate("scalar()"))) beta; POLYBENCH_2D_ARRAY_DECL(tmp,DATA_TYPE __attribute__((annotate("target('tmp') scalar(range(0, 30000000000000) final)"))),NI,NJ,ni,nj); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE __attribute__((annotate("target('A') scalar()"))),NI,NK,ni,nk); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE __attribute__((annotate("target('B') scalar()"))),NK,NJ,nk,nj); POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE __attribute__((annotate("target('C') scalar()"))),NL,NJ,nl,nj); POLYBENCH_2D_ARRAY_DECL(D,DATA_TYPE __attribute__((annotate("target('D') scalar(range(0, 16000000000000000000) final)"))),NI,NL,ni,nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, &alpha, &beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_2mm (ni, nj, nk, nl, alpha, beta, POLYBENCH_ARRAY(tmp), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(D))); /* Be clean. */ POLYBENCH_FREE_ARRAY(tmp); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); return 0; }
ransac.h
#pragma once #include <algorithm> #include <iostream> #include <numeric> #include <tuple> #include <vector> #include <misc3d/logging.h> #include <misc3d/utils.h> #include <open3d/geometry/PointCloud.h> #include <Eigen/Core> #define EPS 1.0e-8 namespace misc3d { namespace common { /** * @brief base primitives model * */ class Model { public: Eigen::VectorXd parameters_; // The parameters of the current model Model(const Eigen::VectorXd &parameters) : parameters_(parameters) {} Model &operator=(const Model &model) { parameters_ = model.parameters_; return *this; } Model() {} }; /** * @brief the plane model is described as [a, b, c, d] => ax + by + cz + d = 0 * */ class Plane : public Model { public: Plane() : Model(Eigen::VectorXd(4)){}; Plane(const Plane &model) { parameters_ = model.parameters_; } Plane &operator=(const Plane &model) { parameters_ = model.parameters_; return *this; } }; /** * @brief the sphere is describe as [x, y, z, r], where the first three are * center and the last is radius * */ class Sphere : public Model { public: Sphere() : Model(Eigen::VectorXd(4)){}; Sphere(const Sphere &model) { parameters_ = model.parameters_; } Sphere &operator=(const Sphere &model) { parameters_ = model.parameters_; return *this; } }; /** * @brief the cylinder is describe as [x, y, z, nx, ny, nz, r], where the first * three is a point on the cylinder axis and the second three are normal vector * or called direction vector, the last one is radius * */ class Cylinder : public Model { public: Cylinder() : Model(Eigen::VectorXd(7)){}; Cylinder(const Cylinder &model) { parameters_ = model.parameters_; } Cylinder &operator=(const Cylinder &model) { parameters_ = model.parameters_; return *this; } }; class ModelEstimator { protected: ModelEstimator(int minimal_sample) : minimal_sample_(minimal_sample) {} /** * @brief check whether number of input points meet the minimal requirement * * @param num * @return true * @return false */ bool MinimalCheck(int num) const { return num >= minimal_sample_ ? true : false; } public: /** * @brief fit model using least sample points * * @param pc * @param model * @return true * @return false */ virtual bool MinimalFit(const open3d::geometry::PointCloud &pc, Model &model) const = 0; /** * @brief fit model using least square method * * @param pc * @param model * @return true * @return false */ virtual bool GeneralFit(const open3d::geometry::PointCloud &pc, Model &model) const = 0; /** * @brief evaluate point distance to model to determine inlier&outlier * * @param query * @param model * @return double */ virtual double CalcPointToModelDistance(const Eigen::Vector3d &query, const Model &model) const = 0; public: int minimal_sample_; }; class PlaneEstimator : public ModelEstimator { public: PlaneEstimator() : ModelEstimator(3) {} bool MinimalFit(const open3d::geometry::PointCloud &pc, Model &model) const override { if (!MinimalCheck(pc.points_.size())) { return false; } const auto &points = pc.points_; const Eigen::Vector3d e0 = points[1] - points[0]; const Eigen::Vector3d e1 = points[2] - points[0]; Eigen::Vector3d abc = e0.cross(e1); const double norm = abc.norm(); // if the three points are co-linear, return invalid plane if (norm < EPS) { return false; } abc /= abc.norm(); const double d = -abc.dot(points[0]); model.parameters_(0) = abc(0); model.parameters_(1) = abc(1); model.parameters_(2) = abc(2); model.parameters_(3) = d; return true; } bool GeneralFit(const open3d::geometry::PointCloud &pc, Model &model) const override { const size_t num = pc.points_.size(); if (!MinimalCheck(num)) { return false; } const auto &points = pc.points_; Eigen::Vector3d mean(0, 0, 0); for (auto &p : points) { mean += p; } mean /= double(num); double xx = 0, xy = 0, xz = 0, yy = 0, yz = 0, zz = 0; #pragma omp parallel for reduction(+ : xx, xy, xz, yy, yz, zz) for (int i = 0; i < num; ++i) { const Eigen::Vector3d residual = points[i] - mean; xx += residual(0) * residual(0); xy += residual(0) * residual(1); xz += residual(0) * residual(2); yy += residual(1) * residual(1); yz += residual(1) * residual(2); zz += residual(2) * residual(2); } const double det_x = yy * zz - yz * yz; const double det_y = xx * zz - xz * xz; const double det_z = xx * yy - xy * xy; Eigen::Vector3d abc; if (det_x > det_y && det_x > det_z) { abc = Eigen::Vector3d(det_x, xz * yz - xy * zz, xy * yz - xz * yy); } else if (det_y > det_z) { abc = Eigen::Vector3d(xz * yz - xy * zz, det_y, xy * xz - yz * xx); } else { abc = Eigen::Vector3d(xy * yz - xz * yy, xy * xz - yz * xx, det_z); } const double norm = abc.norm(); if (norm < EPS) { return false; } abc /= norm; const double d = -abc.dot(mean); model.parameters_ = Eigen::Vector4d(abc(0), abc(1), abc(2), d); return true; } double CalcPointToModelDistance(const Eigen::Vector3d &query, const Model &model) const override { const Eigen::Vector4d p(query(0), query(1), query(2), 1); return std::abs(model.parameters_.transpose() * p) / model.parameters_.head<3>().norm(); } }; class SphereEstimator : public ModelEstimator { private: bool ValidationCheck(const open3d::geometry::PointCloud &pc) const { PlaneEstimator fit; Plane plane; const bool ret = fit.MinimalFit(pc, plane); if (!ret) { return false; } return fit.CalcPointToModelDistance(pc.points_[3], plane) < EPS ? false : true; } public: SphereEstimator() : ModelEstimator(4) {} bool MinimalFit(const open3d::geometry::PointCloud &pc, Model &model) const override { const auto &points = pc.points_; if (!MinimalCheck(points.size()) || !ValidationCheck(pc)) { return false; } Eigen::Matrix4d det_mat; det_mat.setOnes(4, 4); for (size_t i = 0; i < 4; i++) { det_mat(i, 0) = points[i](0); det_mat(i, 1) = points[i](1); det_mat(i, 2) = points[i](2); } const double M11 = det_mat.determinant(); for (size_t i = 0; i < 4; i++) { det_mat(i, 0) = points[i].transpose() * points[i]; det_mat(i, 1) = points[i](1); det_mat(i, 2) = points[i](2); } const double M12 = det_mat.determinant(); for (size_t i = 0; i < 4; i++) { det_mat(i, 0) = points[i].transpose() * points[i]; det_mat(i, 1) = points[i](0); det_mat(i, 2) = points[i](2); } const double M13 = det_mat.determinant(); for (size_t i = 0; i < 4; i++) { det_mat(i, 0) = points[i].transpose() * points[i]; det_mat(i, 1) = points[i](0); det_mat(i, 2) = points[i](1); } const double M14 = det_mat.determinant(); for (size_t i = 0; i < 4; i++) { det_mat(i, 0) = points[i].transpose() * points[i]; det_mat(i, 1) = points[i](0); det_mat(i, 2) = points[i](1); det_mat(i, 3) = points[i](2); } const double M15 = det_mat.determinant(); const Eigen::Vector3d center(0.5 * (M12 / M11), -0.5 * (M13 / M11), 0.5 * (M14 / M11)); const double radius = std::sqrt(center.transpose() * center - (M15 / M11)); model.parameters_(0) = center(0); model.parameters_(1) = center(1); model.parameters_(2) = center(2); model.parameters_(3) = radius; return true; } bool GeneralFit(const open3d::geometry::PointCloud &pc, Model &model) const override { const size_t num = pc.points_.size(); if (!MinimalCheck(num)) { return false; } const auto &o3d_points = pc.points_; Eigen::Matrix<double, 3, Eigen::Dynamic> points; VectorToEigenMatrix<double>(o3d_points, points); Eigen::Matrix<double, Eigen::Dynamic, 4> A; A.setOnes(num, 4); A.col(0) = points.row(0).transpose() * 2; A.col(1) = points.row(1).transpose() * 2; A.col(2) = points.row(2).transpose() * 2; Eigen::VectorXd b = (points.row(0).array().pow(2) + points.row(1).array().pow(2) + points.row(2).array().pow(2)) .matrix(); // TODO: dangerous when b is very large, which need large memory to // compute v. should be improved. const Eigen::Vector4d w = A.bdcSvd(Eigen::ComputeFullU | Eigen::ComputeFullV).solve(b); const double radius = sqrt(w(0) * w(0) + w(1) * w(1) + w(2) * w(2) + w(3)); model.parameters_(0) = w(0); model.parameters_(1) = w(1); model.parameters_(2) = w(2); model.parameters_(3) = radius; return true; } double CalcPointToModelDistance(const Eigen::Vector3d &query, const Model &model) const override { const Eigen::Vector3d center = model.parameters_.head<3>(); const double radius = model.parameters_(3); const double d = (query - center).norm(); if (d <= radius) { return radius - d; } else { return d - radius; } } }; /** * @brief Cylinder estimation reference from PCL implementation. * */ class CylinderEstimator : public ModelEstimator { public: CylinderEstimator() : ModelEstimator(2) {} bool MinimalFit(const open3d::geometry::PointCloud &pc, Model &model) const override { if (!pc.HasNormals()) { misc3d::LogError("Cylinder estimation requires normals."); return false; } if (!MinimalCheck(pc.points_.size())) { return false; } const auto &points = pc.points_; const auto &normals = pc.normals_; if (fabs(points[0](0) - points[1](0) <= std::numeric_limits<double>::epsilon() && fabs(points[0](1) - points[1](1)) <= std::numeric_limits<float>::epsilon() && fabs(points[0](2) - points[1](2)) <= std::numeric_limits<float>::epsilon())) { return false; } const Eigen::Vector4d p1(points[0](0), points[0](1), points[0](2), 0); const Eigen::Vector4d p2(points[1](0), points[1](1), points[1](2), 0); const Eigen::Vector4d n1(normals[0](0), normals[0](1), normals[0](2), 0); const Eigen::Vector4d n2(normals[1](0), normals[1](1), normals[1](2), 0); const Eigen::Vector4d w = n1 + p1 - p2; const double a = n1.dot(n1); const double b = n1.dot(n2); const double c = n2.dot(n2); const double d = n1.dot(w); const double e = n2.dot(w); const double denominator = a * c - b * b; double sc, tc; if (denominator < 1e-8) // The lines are almost parallel { sc = 0; tc = (b > c ? d / b : e / c); // Use the largest denominator } else { sc = (b * e - c * d) / denominator; tc = (a * e - b * d) / denominator; } const Eigen::Vector4d line_pt = p1 + n1 + sc * n1; Eigen::Vector4d line_dir = p2 + tc * n2 - line_pt; line_dir.normalize(); model.parameters_[0] = line_pt[0]; model.parameters_[1] = line_pt[1]; model.parameters_[2] = line_pt[2]; model.parameters_[3] = line_dir[0]; model.parameters_[4] = line_dir[1]; model.parameters_[5] = line_dir[2]; // cylinder radius model.parameters_[6] = CalcPoint2LineDistance<double>( points[0], line_pt.head<3>(), line_dir.head<3>()); return true; } // namespace ransac /** * @brief the general fit of clinder model is not implemented yet * TODO: 1. linear least square method. 2. nonlinear least square method. * * @param pc * @return true * @return false */ bool GeneralFit(const open3d::geometry::PointCloud &pc, Model &model) const override { // if (!MinimalCheck(points.cols())) { // return false; // } return true; } double CalcPointToModelDistance(const Eigen::Vector3d &query, const Model &model) const override { const Eigen::Matrix<double, 7, 1> w = model.parameters_; const Eigen::Vector3d n(w(3), w(4), w(5)); const Eigen::Vector3d center(w(0), w(1), w(2)); const Eigen::Vector3d ref(w(0) + w(3), w(1) + w(4), w(2) + w(5)); double d = CalcPoint2LineDistance<double>(query, center, ref); return abs(d - w(6)); } }; // namespace misc3d /** * @brief RANSAC class for model fitting. * * @tparam ModelEstimator * @tparam Model * @tparam Sampler */ template <class ModelEstimator, class Model, class Sampler> class RANSAC { public: RANSAC() : fitness_(0) , inlier_rmse_(0) , max_iteration_(1000) , probability_(0.9999) {} /** * @brief Set Point Cloud to be used for RANSAC * * @param points */ void SetPointCloud(const open3d::geometry::PointCloud &pc) { if (!pc_.HasPoints()) { pc_.Clear(); } pc_ = pc; } /** * @brief set probability to find the best model * * @param probability */ void SetProbability(double probability) { if (probability <= 0 || probability > 1) { misc3d::LogError("Probability must be > 0 or <= 1.0"); } probability_ = probability; } /** * @brief set maximum iteration, usually used if using parallel ransac * fitting * * @param num */ void SetMaxIteration(size_t num) { max_iteration_ = num; } /** * @brief fit model with given parameters * * @param threshold * @param model * @param inlier_indices * @return true * @return false */ bool FitModel(double threshold, Model &model, std::vector<size_t> &inlier_indices) { Clear(); const size_t num_points = pc_.points_.size(); if (num_points < estimator_.minimal_sample_) { misc3d::LogError("Can not fit model due to lack of points"); return false; } return FitModelParallel(threshold, model, inlier_indices); } private: void Clear() { fitness_ = 0; inlier_rmse_ = 0; } /** * @brief refine model using general fitting of estimator, usually is least * square method. * * @param threshold * @param model * @param inlier_indices * @return true * @return false */ bool RefineModel(double threshold, Model &model, std::vector<size_t> &inlier_indices) { inlier_indices.clear(); for (size_t i = 0; i < pc_.points_.size(); ++i) { const double d = estimator_.CalcPointToModelDistance(pc_.points_[i], model); if (d < threshold) { inlier_indices.emplace_back(i); } } // improve best model using general fitting const auto inliers_pc = pc_.SelectByIndex(inlier_indices); return estimator_.GeneralFit(*inliers_pc, model); } /** * @brief Ransac fitting method, the iteration number is varying * with inlier number in each iteration using multithreading. * * @param threshold * @param model * @param inlier_indices * @return true * @return false */ bool FitModelParallel(double threshold, Model &model, std::vector<size_t> &inlier_indices) { const size_t num_points = pc_.points_.size(); std::vector<size_t> indices_list(num_points); std::iota(std::begin(indices_list), std::end(indices_list), 0); Model best_model; size_t count = 0; size_t current_iteration = std::numeric_limits<size_t>::max(); RandomSampler<size_t> sampler(num_points); #pragma omp parallel for schedule(static) for (int i = 0; i < max_iteration_; ++i) { if (count > current_iteration) { continue; } const std::vector<size_t> sample_indices = sampler(estimator_.minimal_sample_); const auto sample = pc_.SelectByIndex(sample_indices); Model model_trial; bool ret; ret = estimator_.MinimalFit(*sample, model_trial); if (!ret) { continue; } const auto result = EvaluateModel(pc_.points_, threshold, model_trial); double fitness = std::get<0>(result); double inlier_rmse = std::get<1>(result); #pragma omp critical { // update model if satisfy both fitness and rmse check if (fitness > fitness_ || (fitness == fitness_ && inlier_rmse < inlier_rmse_)) { fitness_ = fitness; inlier_rmse_ = inlier_rmse; best_model = model_trial; if (fitness_ < 1.0) { current_iteration = std::min( log(1 - probability_) / log(1 - pow(fitness_, estimator_.minimal_sample_)), (double)max_iteration_); } else { // Set break_iteration to 0 to force to break the loop. current_iteration = 0; } } count++; } } misc3d::LogInfo( "Find best model with {}% inliers and run {} " "iterations", fitness_ * 100, count); const bool ret = RefineModel(threshold, best_model, inlier_indices); model = best_model; return ret; } std::tuple<double, double> EvaluateModel( const std::vector<Eigen::Vector3d> &points, double threshold, const Model &model) { size_t inlier_num = 0; double error = 0; for (size_t idx = 0; idx < points.size(); ++idx) { const double distance = estimator_.CalcPointToModelDistance(points[idx], model); if (distance < threshold) { error += distance; inlier_num++; } } double fitness; double inlier_rmse; if (inlier_num == 0) { fitness = 0; inlier_rmse = 1e+10; } else { fitness = (double)inlier_num / (double)points.size(); inlier_rmse = error / std::sqrt((double)inlier_num); } return std::make_tuple(fitness, inlier_rmse); } private: open3d::geometry::PointCloud pc_; double probability_; size_t max_iteration_; double fitness_; double inlier_rmse_; ModelEstimator estimator_; }; using RandomIndexSampler = RandomSampler<size_t>; using RANSACPlane = RANSAC<PlaneEstimator, Plane, RandomIndexSampler>; using RANSACShpere = RANSAC<SphereEstimator, Sphere, RandomIndexSampler>; using RANSACCylinder = RANSAC<CylinderEstimator, Cylinder, RandomIndexSampler>; } // namespace common } // namespace misc3d
VectorReduction.h
//////////////////////////////////////////////////////////////////////////////// // // // File Name: CSVector.h // // // // Author: Andreas Buttenschoen <andreas@buttenschoen.ca> // // Created: 2018-03-30 12:22:20 // // // //////////////////////////////////////////////////////////////////////////////// #ifndef CS_VECTOR_REDUCTION_H #define CS_VECTOR_REDUCTION_H #include <iostream> #include <cmath> using std::abs; using std::max; template <typename T> T zero(T value) { return T(0); } struct one_norm_functor { template <typename Value> static inline void init(Value& value) { value = zero(value); } template <typename Value, typename Element> static inline void update(Value& value, const Element& x) { value += abs(x); } template <typename Value> static inline void finish(Value& value, const Value& value2) { value += value2; } template <typename Value> static inline Value post_reduction(const Value& value) { return value; } }; struct sum_functor { template <typename Value> static inline void init(Value& value) { value = zero(value); } template <typename Value, typename Element> static inline void update(Value& value, const Element& x) { value += x; } template <typename Value> static inline void finish(Value& value, const Value& value2) { value += value2; } template <typename Value> static inline Value post_reduction(const Value& value) { return value; } }; struct product_functor { template <typename Value> static inline void init(Value& value) { value = zero(value); } template <typename Value, typename Element> static inline void update(Value& value, const Element& x) { value *= x; } template <typename Value> static inline void finish(Value& value, const Value& value2) { value *= value2; } template <typename Value> static inline Value post_reduction(const Value& value) { return value; } }; struct two_norm_functor { template <typename Value> static inline void init(Value& value) { value = zero(value); } template <typename Value, typename Element> static inline void update(Value& value, const Element& x) { value += x * x; } template <typename Value> static inline void finish(Value& value, const Value& value2) { value += value2; } template <typename Value> static inline Value post_reduction(const Value& value) { Value (*sqrt) (const Value) = std::sqrt; return sqrt(value); } }; struct unary_dot : two_norm_functor { template <typename Value> static inline Value post_reduction(const Value& value) { return value; } }; struct infinity_norm_functor { template <typename Value> static inline void init(Value& value) { value = zero(value); } template <typename Value, typename Element> static inline void update(Value& value, const Element& x) { value = max(value, abs(x)); } template <typename Value> static inline void finish(Value& value, const Value& value2) { value = max(value, abs(value2)); } template <typename Value> static inline Value post_reduction(const Value& value) { return value; } }; namespace impl { template <unsigned long Index0, unsigned long Max0, typename Functor> struct reduction { using next = reduction<Index0 + 1, Max0, Functor>; template <typename Value> static inline void init(Value& tmp00, Value& tmp01, Value& tmp02, Value& tmp03, Value& tmp04, Value& tmp05, Value& tmp06, Value& tmp07) { Functor::init(tmp00); next::init(tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, tmp00); } template <typename Value, typename Vector, typename Size> static inline void update(Value& tmp00, Value& tmp01, Value& tmp02, Value& tmp03, Value& tmp04, Value& tmp05, Value& tmp06, Value& tmp07, const Vector& v, Size i) { Functor::update(tmp00, v[i + Index0]); next::update(tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, tmp00, v, i); } template <typename Value> static inline void finish(Value& tmp00, Value& tmp01, Value& tmp02, Value& tmp03, Value& tmp04, Value& tmp05, Value& tmp06, Value& tmp07) { next::finish(tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, tmp00); Functor::finish(tmp00, tmp01); } }; template <unsigned long Max0, typename Functor> struct reduction<Max0, Max0, Functor> { template <typename Value> static inline void init(Value& tmp00, Value&, Value&, Value&, Value&, Value&, Value&, Value&) { Functor::init(tmp00); } template <typename Value, typename Vector, typename Size> static inline void update(Value& tmp00, Value&, Value&, Value&, Value&, Value&, Value&, Value&, const Vector& v, Size i) { Functor::update(tmp00, v[i + Max0]); } template <typename Value> static inline void finish(Value&, Value&, Value&, Value&, Value&, Value&, Value&, Value&) {} }; } // end namespace template <unsigned long Unroll, typename Functor, typename Result> struct reduction { template <typename Vector> static inline Result apply(const Vector& v) { //using value_type = typename Vector::value_type; using size_type = typename Vector::size_type; Result result; Result tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07; constexpr size_type UNROLL = std::min(Unroll, size_type(8)); auto s = size(v); auto sb = s / UNROLL * UNROLL; Functor::init(result); //#pragma omp parallel { impl::reduction<0, UNROLL-1, Functor>::init(tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07); //#pragma omp for for (size_t i = 0; i < sb; i+=UNROLL) impl::reduction<0, UNROLL-1, Functor>::update(tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, v, i); impl::reduction<0, UNROLL-1, Functor>::finish(tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07); //#pragma omp critical Functor::finish(result, tmp00); } for (size_t i = sb; i < s; i++) Functor::update(result, v[i]); return Functor::post_reduction(result); } }; namespace impl { template <unsigned long Index0, unsigned long Max0> struct dot_aux { using next = dot_aux<Index0 + 1, Max0>; template <typename Value, typename Vector1, typename Vector2, typename Size> static inline void apply(Value& tmp00, Value& tmp01, Value& tmp02, Value& tmp03, Value& tmp04, Value& tmp05, Value& tmp06, Value& tmp07, const Vector1& v1, const Vector2& v2, Size i) { tmp00 = std::fma(v1[i + Index0], v2[i + Index0], tmp00); next::apply(tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, tmp00, v1, v2, i); } }; template <unsigned long Max0> struct dot_aux<Max0, Max0> { template <typename Value, typename Vector1, typename Vector2, typename Size> static inline void apply(Value& tmp00, Value&, Value&, Value&, Value&, Value&, Value&, Value&, const Vector1& v1, const Vector2& v2, Size i) { tmp00 = std::fma(v1[i + Max0], v2[i + Max0], tmp00); } }; template <unsigned long Unroll> struct dot { template <typename Vector1, typename Vector2> static inline auto apply(const Vector1& v1, const Vector2& v2) { using value_type = typename Vector1::value_type; using size_type = typename Vector1::size_type; value_type z = value_type(0); value_type result = z; constexpr size_type UNROLL = std::min(Unroll, size_type(8)); const size_type N = size(v1); const size_type no_loops = N / UNROLL; //#pragma omp parallel { value_type tmp00 = z, tmp01 = z, tmp02 = z, tmp03 = z, tmp04 = z, tmp05 = z, tmp06 = z, tmp07 = z; //#pragma omp for for (size_type i = 0; i < no_loops; i+=UNROLL) dot_aux<0, UNROLL-1>::apply(tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, v1, v2, i); //#pragma omp critical result += ((tmp00 + tmp01) + (tmp02 + tmp03)) + ((tmp04 + tmp05) + (tmp06 + tmp07)); } for (size_type i = UNROLL * no_loops; i < N; i++) result = std::fma(v1[i], v2[i], result); return result; } }; } // end namespace template <unsigned long Unroll, typename Vector1, typename Vector2> inline auto dot(const Vector1& v1, const Vector2& v2) { return impl::dot<Unroll>::apply(v1, v2); } #endif
ZQ_CNN_BBoxUtils.h
#ifndef _ZQ_CNN_BBOX_UTILS_H_ #define _ZQ_CNN_BBOX_UTILS_H_ #pragma once #include "ZQ_CNN_BBox.h" #include <string> #include <math.h> #include <stdlib.h> #include <algorithm> namespace ZQ { class ZQ_CNN_BBoxUtils { public: enum PriorBoxCodeType { PriorBoxCodeType_CORNER = 0, PriorBoxCodeType_CORNER_SIZE, PriorBoxCodeType_CENTER_SIZE }; static bool _cmp_score(const ZQ_CNN_OrderScore& lsh, const ZQ_CNN_OrderScore& rsh) { return lsh.score < rsh.score; } static void _nms(std::vector<ZQ_CNN_BBox> &boundingBox, std::vector<ZQ_CNN_OrderScore> &bboxScore, const float overlap_threshold, const std::string& modelname = "Union", int overlap_count_thresh = 0, int thread_num = 1) { if (boundingBox.empty() || overlap_threshold >= 1.0) { return; } std::vector<int> heros; std::vector<int> overlap_num; //sort the score sort(bboxScore.begin(), bboxScore.end(), _cmp_score); int order = 0; float IOU = 0; float maxX = 0; float maxY = 0; float minX = 0; float minY = 0; while (bboxScore.size() > 0) { order = bboxScore.back().oriOrder; bboxScore.pop_back(); if (order < 0)continue; heros.push_back(order); int cur_overlap = 0; boundingBox[order].exist = false;//delete it int box_num = (int)boundingBox.size(); if (thread_num == 1) { for (int num = 0; num < box_num; num++) { if (boundingBox[num].exist) { //the iou maxY = (float)__max(boundingBox[num].row1, boundingBox[order].row1); maxX = (float)__max(boundingBox[num].col1, boundingBox[order].col1); minY = (float)__min(boundingBox[num].row2, boundingBox[order].row2); minX = (float)__min(boundingBox[num].col2, boundingBox[order].col2); //maxX1 and maxY1 reuse maxX = __max(minX - maxX + 1, 0); maxY = __max(minY - maxY + 1, 0); //IOU reuse for the area of two bbox IOU = maxX * maxY; float area1 = boundingBox[num].area; float area2 = boundingBox[order].area; if (!modelname.compare("Union")) IOU = IOU / (area1 + area2 - IOU); else if (!modelname.compare("Min")) { IOU = IOU / __min(area1, area2); } if (IOU > overlap_threshold) { cur_overlap++; boundingBox[num].exist = false; for (std::vector<ZQ_CNN_OrderScore>::iterator it = bboxScore.begin(); it != bboxScore.end(); it++) { if ((*it).oriOrder == num) { (*it).oriOrder = -1; break; } } } } } } else { int chunk_size = (int)ceil(box_num / thread_num); #pragma omp parallel for schedule(static, chunk_size) num_threads(thread_num) for (int num = 0; num < box_num; num++) { if (boundingBox.at(num).exist) { //the iou maxY = (float)__max(boundingBox[num].row1, boundingBox[order].row1); maxX = (float)__max(boundingBox[num].col1, boundingBox[order].col1); minY = (float)__min(boundingBox[num].row2, boundingBox[order].row2); minX = (float)__min(boundingBox[num].col2, boundingBox[order].col2); //maxX1 and maxY1 reuse maxX = __max(minX - maxX + 1, 0); maxY = __max(minY - maxY + 1, 0); //IOU reuse for the area of two bbox IOU = maxX * maxY; float area1 = boundingBox[num].area; float area2 = boundingBox[order].area; if (!modelname.compare("Union")) IOU = IOU / (area1 + area2 - IOU); else if (!modelname.compare("Min")) { IOU = IOU / __min(area1, area2); } if (IOU > overlap_threshold) { cur_overlap++; boundingBox.at(num).exist = false; for (std::vector<ZQ_CNN_OrderScore>::iterator it = bboxScore.begin(); it != bboxScore.end(); it++) { if ((*it).oriOrder == num) { (*it).oriOrder = -1; break; } } } } } } overlap_num.push_back(cur_overlap); } for (int i = 0; i < (int)heros.size(); i++) { if(!boundingBox[heros[i]].need_check_overlap_count || overlap_num[i] >= overlap_count_thresh) boundingBox[heros[i]].exist = true; } //clear exist= false; for (int i = (int)(boundingBox.size() - 1); i >= 0; i--) { if (!boundingBox[i].exist) { boundingBox.erase(boundingBox.begin() + i); } } } static void _refine_and_square_bbox(std::vector<ZQ_CNN_BBox> &vecBbox, const int width, const int height, bool square = true) { float bbw = 0, bbh = 0, bboxSize = 0; float h = 0, w = 0; float x1 = 0, y1 = 0, x2 = 0, y2 = 0; for (std::vector<ZQ_CNN_BBox>::iterator it = vecBbox.begin(); it != vecBbox.end(); it++) { if ((*it).exist) { bbh = (float)((*it).row2 - (*it).row1 + 1); bbw = (float)((*it).col2 - (*it).col1 + 1); y1 = (*it).row1 + (*it).regreCoord[1] * bbh; x1 = (*it).col1 + (*it).regreCoord[0] * bbw; y2 = (*it).row2 + (*it).regreCoord[3] * bbh; x2 = (*it).col2 + (*it).regreCoord[2] * bbw; w = x2 - x1 + 1; h = y2 - y1 + 1; if (square) { float scale_h = h*it->scale_y; float scale_w = w*it->scale_x; bboxSize = (scale_h > scale_w) ? scale_h : scale_w; y1 = y1 + h*0.5f - bboxSize/it->scale_y*0.5f; x1 = x1 + w*0.5f - bboxSize/it->scale_x*0.5f; (*it).row2 = (int)round(y1 + bboxSize / it->scale_y - 1); (*it).col2 = (int)round(x1 + bboxSize / it->scale_x - 1); (*it).row1 = (int)round(y1); (*it).col1 = (int)round(x1); } else { (*it).row2 = (int)round(y1 + h - 1); (*it).col2 = (int)round(x1 + w - 1); (*it).row1 = (int)round(y1); (*it).col1 = (int)round(x1); } //boundary check /*if ((*it).row1 < 0)(*it).row1 = 0; if ((*it).col1 < 0)(*it).col1 = 0; if ((*it).row2 > height)(*it).row2 = height - 1; if ((*it).col2 > width)(*it).col2 = width - 1;*/ it->area = (float)(it->row2 - it->row1)*(it->col2 - it->col1); } } } static void _square_bbox(std::vector<ZQ_CNN_BBox> &vecBbox, const int width, const int height) { float bbw = 0, bbh = 0, bboxSize = 0; float h = 0, w = 0; float x1 = 0, y1 = 0, x2 = 0, y2 = 0; for (std::vector<ZQ_CNN_BBox>::iterator it = vecBbox.begin(); it != vecBbox.end(); it++) { if ((*it).exist) { h = (float)((*it).row2 - (*it).row1 + 1); w = (float)((*it).col2 - (*it).col1 + 1); y1 = (float)(*it).row1; x1 = (float)(*it).col1; float scale_h = h*it->scale_y; float scale_w = w*it->scale_x; bboxSize = (scale_h > scale_w) ? scale_h : scale_w; y1 = y1 + h*0.5f - bboxSize / it->scale_y*0.5f; x1 = x1 + w*0.5f - bboxSize / it->scale_x*0.5f; (*it).row2 = (int)round(y1 + bboxSize / it->scale_y - 1); (*it).col2 = (int)round(x1 + bboxSize / it->scale_x - 1); (*it).row1 = (int)round(y1); (*it).col1 = (int)round(x1); //boundary check /*if ((*it).row1 < 0)(*it).row1 = 0; if ((*it).col1 < 0)(*it).col1 = 0; if ((*it).row2 > height)(*it).row2 = height - 1; if ((*it).col2 > width)(*it).col2 = width - 1;*/ it->area = (float)(it->row2 - it->row1)*(it->col2 - it->col1); } } } static bool DecodeBBoxesAll(const std::vector<ZQ_CNN_LabelBBox>& all_loc_preds, const std::vector<ZQ_CNN_NormalizedBBox>& prior_bboxes, const std::vector<std::vector<float> >& prior_variances, const int num, const bool share_location, const int num_loc_classes, const int background_label_id, const PriorBoxCodeType code_type, const bool variance_encoded_in_target, const bool clip, std::vector<ZQ_CNN_LabelBBox>* all_decode_bboxes) { if (all_loc_preds.size() != num) return false; all_decode_bboxes->clear(); all_decode_bboxes->resize(num); for (int i = 0; i < num; ++i) { // Decode predictions into bboxes. ZQ_CNN_LabelBBox& decode_bboxes = (*all_decode_bboxes)[i]; for (int c = 0; c < num_loc_classes; ++c) { int label = share_location ? -1 : c; if (label == background_label_id) { // Ignore background class. continue; } if (all_loc_preds[i].find(label) == all_loc_preds[i].end()) { // Something bad happened if there are no predictions for current label. //LOG(FATAL) << "Could not find location predictions for label " << label; } const std::vector<ZQ_CNN_NormalizedBBox>& label_loc_preds = all_loc_preds[i].find(label)->second; if (!DecodeBBoxes(prior_bboxes, prior_variances, code_type, variance_encoded_in_target, clip, label_loc_preds, &(decode_bboxes[label]))) return false; } } return true; } static bool DecodeBBoxes( const std::vector<ZQ_CNN_NormalizedBBox>& prior_bboxes, const std::vector<std::vector<float> >& prior_variances, const PriorBoxCodeType code_type, const bool variance_encoded_in_target, const bool clip_bbox, const std::vector<ZQ_CNN_NormalizedBBox>& bboxes, std::vector<ZQ_CNN_NormalizedBBox>* decode_bboxes) { if (prior_bboxes.size() != prior_variances.size()) return false; if (prior_bboxes.size() != bboxes.size()) return false; int num_bboxes = (int)prior_bboxes.size(); if (num_bboxes >= 1) { if (prior_variances[0].size() != 4) return false; } decode_bboxes->clear(); for (int i = 0; i < num_bboxes; ++i) { ZQ_CNN_NormalizedBBox decode_bbox; if (!DecodeBBox(prior_bboxes[i], prior_variances[i], code_type, variance_encoded_in_target, clip_bbox, bboxes[i], &decode_bbox)) return false; decode_bboxes->push_back(decode_bbox); } return true; } static bool DecodeBBox( const ZQ_CNN_NormalizedBBox& prior_bbox, const std::vector<float>& prior_variance, const PriorBoxCodeType code_type, const bool variance_encoded_in_target, const bool clip_bbox, const ZQ_CNN_NormalizedBBox& bbox, ZQ_CNN_NormalizedBBox* decode_bbox) { if (code_type == PriorBoxCodeType_CORNER) { if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. decode_bbox->col1 = prior_bbox.col1 + bbox.col1; decode_bbox->col2 = prior_bbox.col2 + bbox.col2; decode_bbox->row1 = prior_bbox.row1 + bbox.row1; decode_bbox->row2 = prior_bbox.row2 + bbox.row2; } else { // variance is encoded in bbox, we need to scale the offset accordingly. decode_bbox->col1 = prior_bbox.col1 + prior_variance[0] * bbox.col1; decode_bbox->row1 = prior_bbox.row1 + prior_variance[1] * bbox.row1; decode_bbox->col2 = prior_bbox.col2 + prior_variance[2] * bbox.col2; decode_bbox->row2 = prior_bbox.row2 + prior_variance[3] * bbox.row2; } } else if (code_type == PriorBoxCodeType_CENTER_SIZE) { float prior_width = prior_bbox.col2 - prior_bbox.col1; if (prior_width < 0) { // return false; printf("x = [%f , %f]\n", prior_bbox.col1, prior_bbox.col2); } float prior_height = prior_bbox.row2 - prior_bbox.row1; if (prior_height < 0) { //return false; printf("y = [%f , %f]\n", prior_bbox.row1, prior_bbox.row2); } float prior_center_x = (prior_bbox.col1 + prior_bbox.col2) / 2.f; float prior_center_y = (prior_bbox.row1 + prior_bbox.row2) / 2.f; float decode_bbox_center_x, decode_bbox_center_y; float decode_bbox_width, decode_bbox_height; if (variance_encoded_in_target) { // variance is encoded in target, we simply need to retore the offset // predictions. decode_bbox_center_x = bbox.col1 * prior_width + prior_center_x; decode_bbox_center_y = bbox.row1 * prior_height + prior_center_y; decode_bbox_width = exp(bbox.col2) * prior_width; decode_bbox_height = exp(bbox.row2) * prior_height; } else { // variance is encoded in bbox, we need to scale the offset accordingly. decode_bbox_center_x = prior_variance[0] * bbox.col1 * prior_width + prior_center_x; decode_bbox_center_y = prior_variance[1] * bbox.row1 * prior_height + prior_center_y; decode_bbox_width = exp(prior_variance[2] * bbox.col2) * prior_width; decode_bbox_height = exp(prior_variance[3] * bbox.row2) * prior_height; } decode_bbox->col1 = decode_bbox_center_x - decode_bbox_width / 2.f; decode_bbox->row1 = decode_bbox_center_y - decode_bbox_height / 2.f; decode_bbox->col2 = decode_bbox_center_x + decode_bbox_width / 2.f; decode_bbox->row2 = decode_bbox_center_y + decode_bbox_height / 2.f; } else if (code_type == PriorBoxCodeType_CORNER_SIZE) { float prior_width = prior_bbox.col2 - prior_bbox.col1; if (prior_width < 0) { //return false; printf("x = [%f , %f]\n", prior_bbox.col1, prior_bbox.col2); } float prior_height = prior_bbox.row2 - prior_bbox.row1; if (prior_height < 0) { // return false; printf("y = [%f , %f]\n", prior_bbox.row1, prior_bbox.row2); } if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. decode_bbox->col1 = prior_bbox.col1 + bbox.col1 * prior_width; decode_bbox->row1 = prior_bbox.row1 + bbox.row1 * prior_height; decode_bbox->col2 = prior_bbox.col2 + bbox.col2 * prior_width; decode_bbox->row2 = prior_bbox.row2 + bbox.row2 * prior_height; } else { // variance is encoded in bbox, we need to scale the offset accordingly. decode_bbox->col1 = prior_bbox.col1 + prior_variance[0] * bbox.col1 * prior_width; decode_bbox->row1 = prior_bbox.row1 + prior_variance[1] * bbox.row1 * prior_height; decode_bbox->col2 = prior_bbox.col2 + prior_variance[2] * bbox.col2 * prior_width; decode_bbox->row2 = prior_bbox.row2 + prior_variance[3] * bbox.row2 * prior_height; } } else { printf("unknown code type\n"); return false; } float bbox_size = BBoxSize(*decode_bbox, true); decode_bbox->size = bbox_size; if (clip_bbox) { ClipBBox(*decode_bbox, decode_bbox); } return true; } static float BBoxSize(const ZQ_CNN_NormalizedBBox& bbox, const bool normalized) { if (bbox.col2 < bbox.col1 || bbox.row2 < bbox.row1) { // If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0. return 0; } else { float width = bbox.col2 - bbox.col1; float height = bbox.row2 - bbox.row1; if (normalized) { return width * height; } else { // If bbox is not within range [0, 1]. return (width + 1) * (height + 1); } } } static void ClipBBox(const ZQ_CNN_NormalizedBBox& bbox, ZQ_CNN_NormalizedBBox* clip_bbox) { clip_bbox->col1 = __max(__min(bbox.col1, 1.f), 0.f); clip_bbox->row1 = __max(__min(bbox.row1, 1.f), 0.f); clip_bbox->col2 = __max(__min(bbox.col2, 1.f), 0.f); clip_bbox->row2 = __max(__min(bbox.row2, 1.f), 0.f); clip_bbox->size = BBoxSize(*clip_bbox, true); clip_bbox->difficult = bbox.difficult; } static bool GetLocPredictions(const float* loc_data, const int num, const int num_preds_per_class, const int num_loc_classes, const bool share_location, std::vector<ZQ_CNN_LabelBBox>* loc_preds) { loc_preds->clear(); if (share_location) { if (num_loc_classes != 1) return false; } loc_preds->resize(num); for (int i = 0; i < num; i++) { ZQ_CNN_LabelBBox& label_bbox = (*loc_preds)[i]; for (int p = 0; p < num_preds_per_class; p++) { int start_idx = p * num_loc_classes * 4; for (int c = 0; c < num_loc_classes; c++) { int label = share_location ? -1 : c; if (label_bbox.find(label) == label_bbox.end()) { label_bbox[label].resize(num_preds_per_class); } label_bbox[label][p].col1 = loc_data[start_idx + c * 4]; label_bbox[label][p].row1 = loc_data[start_idx + c * 4 + 1]; label_bbox[label][p].col2 = loc_data[start_idx + c * 4 + 2]; label_bbox[label][p].row2 = loc_data[start_idx + c * 4 + 3]; } } loc_data += num_preds_per_class * num_loc_classes * 4; } return true; } static void TransformLocations_MXNET(float *out, const float *anchors, const float *loc_pred, const bool clip, const float vx, const float vy, const float vw, const float vh) { // transform predictions to detection results float al = anchors[0]; float at = anchors[1]; float ar = anchors[2]; float ab = anchors[3]; float aw = ar - al; float ah = ab - at; float ax = (al + ar) / 2.f; float ay = (at + ab) / 2.f; float px = loc_pred[0]; float py = loc_pred[1]; float pw = loc_pred[2]; float ph = loc_pred[3]; float ox = px * vx * aw + ax; float oy = py * vy * ah + ay; float ow = exp(pw * vw) * aw / 2; float oh = exp(ph * vh) * ah / 2; out[0] = clip ? __max(0, __min(1, ox - ow)) : (ox - ow); out[1] = clip ? __max(0, __min(1, oy - oh)) : (oy - oh); out[2] = clip ? __max(0, __min(1, ox + ow)) : (ox + ow); out[3] = clip ? __max(0, __min(1, oy + oh)) : (oy + oh); } static void GetConfidenceScores(const float* conf_data, const int num, const int num_preds_per_class, const int num_classes, std::vector<std::map<int, std::vector<float> > >* conf_preds) { conf_preds->clear(); conf_preds->resize(num); for (int i = 0; i < num; ++i) { std::map<int, std::vector<float> >& label_scores = (*conf_preds)[i]; for (int p = 0; p < num_preds_per_class; ++p) { int start_idx = p * num_classes; for (int c = 0; c < num_classes; ++c) { label_scores[c].push_back(conf_data[start_idx + c]); } } conf_data += num_preds_per_class * num_classes; } } static void GetConfidenceScores(const float* conf_data, const int num, const int num_preds_per_class, const int num_classes, const bool class_major, std::vector<std::map<int, std::vector<float> > >* conf_preds) { conf_preds->clear(); conf_preds->resize(num); for (int i = 0; i < num; ++i) { std::map<int, std::vector<float> >& label_scores = (*conf_preds)[i]; if (class_major) { for (int c = 0; c < num_classes; ++c) { label_scores[c].assign(conf_data, conf_data + num_preds_per_class); conf_data += num_preds_per_class; } } else { for (int p = 0; p < num_preds_per_class; ++p) { int start_idx = p * num_classes; for (int c = 0; c < num_classes; ++c) { label_scores[c].push_back(conf_data[start_idx + c]); } } conf_data += num_preds_per_class * num_classes; } } } static void GetPriorBBoxes(const float* prior_data, const int num_priors, std::vector<ZQ_CNN_NormalizedBBox>* prior_bboxes, std::vector<std::vector<float> >* prior_variances) { prior_bboxes->clear(); prior_variances->clear(); for (int i = 0; i < num_priors; ++i) { int start_idx = i * 4; ZQ_CNN_NormalizedBBox bbox; bbox.col1 = prior_data[start_idx]; bbox.row1 = prior_data[start_idx + 1]; bbox.col2 = prior_data[start_idx + 2]; bbox.row2 = prior_data[start_idx + 3]; float bbox_size = BBoxSize(bbox, true); bbox.size = bbox_size; prior_bboxes->push_back(bbox); } for (int i = 0; i < num_priors;i++) { int start_idx = (num_priors + i) * 4; std::vector<float> var; for (int j = 0; j < 4; ++j) { var.push_back(prior_data[start_idx + j]); } prior_variances->push_back(var); } } static bool ApplyNMSFast(const std::vector<ZQ_CNN_NormalizedBBox>& bboxes, const std::vector<float>& scores, const float score_threshold, const float nms_threshold, const float eta, const int top_k, std::vector<int>* indices) { // Sanity check. if (bboxes.size() != scores.size()) { printf("bboxes and scores have different size.\n"); return false; } // Get top_k scores (with corresponding indices). std::vector<std::pair<float, int> > score_index_vec; GetMaxScoreIndex(scores, score_threshold, top_k, &score_index_vec); // Do nms. float adaptive_threshold = nms_threshold; indices->clear(); while (score_index_vec.size() != 0) { const int idx = score_index_vec.front().second; bool keep = true; for (int k = 0; k < indices->size(); ++k) { if (keep) { const int kept_idx = (*indices)[k]; float overlap = JaccardOverlap(bboxes[idx], bboxes[kept_idx], true); keep = overlap <= adaptive_threshold; } else { break; } } if (keep) { indices->push_back(idx); } score_index_vec.erase(score_index_vec.begin()); if (keep && eta < 1 && adaptive_threshold > 0.5) { adaptive_threshold *= eta; } } return true; } static void GetMaxScoreIndex(const std::vector<float>& scores, const float threshold, const int top_k, std::vector<std::pair<float, int> >* score_index_vec) { // Generate index score pairs. for (int i = 0; i < scores.size(); ++i) { if (scores[i] > threshold) { score_index_vec->push_back(std::make_pair(scores[i], i)); } } // Sort the score pair according to the scores in descending order std::stable_sort(score_index_vec->begin(), score_index_vec->end(), SortScorePairDescend<int>); // Keep top_k scores if needed. if (top_k > -1 && top_k < score_index_vec->size()) { score_index_vec->resize(top_k); } } template <typename T> static bool SortScorePairDescend(const std::pair<float, T>& pair1, const std::pair<float, T>& pair2) { return pair1.first > pair2.first; } static float JaccardOverlap(const ZQ_CNN_NormalizedBBox& bbox1, const ZQ_CNN_NormalizedBBox& bbox2, const bool normalized) { ZQ_CNN_NormalizedBBox intersect_bbox; IntersectBBox(bbox1, bbox2, &intersect_bbox); float intersect_width, intersect_height; if (normalized) { intersect_width = intersect_bbox.col2 - intersect_bbox.col1; intersect_height = intersect_bbox.row2 - intersect_bbox.row1; } else { intersect_width = intersect_bbox.col2 - intersect_bbox.col1 + 1; intersect_height = intersect_bbox.row2 - intersect_bbox.row1 + 1; } if (intersect_width > 0 && intersect_height > 0) { float intersect_size = intersect_width * intersect_height; float bbox1_size = BBoxSize(bbox1, true); float bbox2_size = BBoxSize(bbox2, true); return intersect_size / (bbox1_size + bbox2_size - intersect_size); } else { return 0.; } } static void IntersectBBox(const ZQ_CNN_NormalizedBBox& bbox1, const ZQ_CNN_NormalizedBBox& bbox2, ZQ_CNN_NormalizedBBox* intersect_bbox) { if (bbox2.col1 > bbox1.col2 || bbox2.col2 < bbox1.col1 || bbox2.row1 > bbox1.row2 || bbox2.row2 < bbox1.row1) { // Return [0, 0, 0, 0] if there is no intersection. intersect_bbox->col1 = 0; intersect_bbox->row1 = 0; intersect_bbox->col2 = 0; intersect_bbox->row2 = 0; } else { intersect_bbox->col1 = __max(bbox1.col1, bbox2.col1); intersect_bbox->row1 = __max(bbox1.row1, bbox2.row1); intersect_bbox->col2 = __min(bbox1.col2, bbox2.col2); intersect_bbox->row2 = __min(bbox1.row2, bbox2.row2); } } }; } #endif
nested_parallel_tasking.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> /* * This test would hang when level instead of active level * used to push task state. */ int main() { // If num_threads is changed to a value greater than 1, then the test passes #pragma omp parallel num_threads(1) { #pragma omp parallel printf("Hello World from thread %d\n", omp_get_thread_num()); } printf("omp_num_threads: %d\n", omp_get_max_threads()); #pragma omp parallel { #pragma omp master #pragma omp task default(none) { printf("%d is executing this task\n", omp_get_thread_num()); } } printf("pass\n"); return 0; }
SpMat.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Narayanan Sundaram (Intel Corp.), Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_SPMAT_H_ #define SRC_SPMAT_H_ #include <string> #include <algorithm> #include "src/TileOps.h" #include "binary_search.h" template <typename T> bool compare_tile_id(const tedge_t<T>& a, const tedge_t<T>& b) { if (a.tile_id < b.tile_id) return true; return false; } template <typename SpTile> class SpMat { public: SpTile** tiles; int* start_idx; int* start_idy; int* nodeIds; int ntiles_x; int ntiles_y; int m; int n; bool empty; std::string name; int num_tiles_x; int num_tiles_y; int (*pfn)(int, int, int, int, int); SpMat() { empty = true; } void set(int _m, int _n, int _ntiles_x, int _ntiles_y, int* _nodeIds, int* _start_idx, int* _start_idy) { /* if(global_myrank == 0) { for(int i = 0 ; i < _ntiles_y + 1 ; i++) { std::cout << "_start_idy[" << i << "]: " << _start_idy[i] << std::endl; } for(int i = 0 ; i < _ntiles_x + 1 ; i++) { std::cout << "_start_idx[" << i << "]: " << _start_idx[i] << std::endl; } } */ empty = false; // Copy metadata start_idx = reinterpret_cast<int*>(_mm_malloc((_ntiles_x + 1) * sizeof(int), 64)); start_idy = reinterpret_cast<int*>(_mm_malloc((_ntiles_y + 1) * sizeof(int), 64)); nodeIds = reinterpret_cast<int*>( _mm_malloc((_ntiles_x * _ntiles_y) * sizeof(int), 64)); memcpy(start_idx, _start_idx, (_ntiles_x + 1) * sizeof(int)); memcpy(start_idy, _start_idy, (_ntiles_y + 1) * sizeof(int)); memcpy(nodeIds, _nodeIds, (_ntiles_x * _ntiles_y) * sizeof(int)); m = _m; n = _n; ntiles_x = _ntiles_x; ntiles_y = _ntiles_y; assert(ntiles_x > 0); assert(ntiles_y > 0); MPI_Barrier(MPI_COMM_WORLD); // Allocate space for tiles tiles = new SpTile* [ntiles_y]; for (int i = 0; i < ntiles_y; i++) { tiles[i] = new SpTile[ntiles_x]; } // Set metadata for (int tile_j = 0; tile_j < ntiles_x; tile_j++) { for (int tile_i = 0; tile_i < ntiles_y; tile_i++) { int tile_m = start_idy[tile_i + 1] - start_idy[tile_i]; int tile_n = start_idx[tile_j + 1] - start_idx[tile_j]; tiles[tile_i][tile_j] = SpTile(tile_m, tile_n); std::stringstream ss; ss << "Created_" << tile_i << "," << tile_j; tiles[tile_i][tile_j].name = ss.str(); } } } inline int getPartition(const int src, const int dst, int* ival, int* jval) const { (*ival) = -1; (*jval) = -1; for (int i = 0; i < ntiles_y; i++) { if ((src > start_idy[i]) && (src <= start_idy[i + 1])) { (*ival) = i; break; } } for (int j = 0; j < ntiles_x; j++) { if ((dst > start_idx[j]) && (dst <= start_idx[j + 1])) { (*jval) = j; break; } } if ((*ival) == -1 || (*jval) == -1) { printf("%d %d == -1\n", src, dst); return -1; } return (*ival) + (*jval) * ntiles_y; } template <typename T> void ingestEdgelist(edgelist_t<T> blob) { int nnz_l = blob.nnz; edge_t<T>* edge_list = blob.edges; int m = blob.m; int n = blob.n; printf("Rank %d: Before shuffle %d edges\n", global_myrank, blob.nnz); edge_t<T> * received_edges; unsigned long int new_nnz = 0; if(global_nrank == 1) { new_nnz = nnz_l; received_edges = new edge_t<T>[new_nnz]; memcpy(received_edges, edge_list, new_nnz * sizeof(edge_t<T>)); } else { tedge_t<T> * tedges = new tedge_t<T>[nnz_l]; #pragma omp parallel for for(unsigned long i = 0 ; i < nnz_l ; i++) { tedges[i].src = edge_list[i].src; tedges[i].dst = edge_list[i].dst; tedges[i].val = edge_list[i].val; int ival, jval; int tile_id = getPartition(edge_list[i].src, edge_list[i].dst, &ival, &jval); assert(tile_id != -1); tedges[i].tile_id = nodeIds[ival + jval * ntiles_y]; } __gnu_parallel::sort(tedges, tedges + nnz_l, compare_tile_id<T>); int * assignment = new int[nnz_l]; #pragma omp parallel for for(unsigned long i = 0 ; i < nnz_l ; i++) { edge_list[i].src = tedges[i].src; edge_list[i].dst = tedges[i].dst; edge_list[i].val = tedges[i].val; assignment[i] = tedges[i].tile_id; } delete [] tedges; unsigned long int * positions = new unsigned long[global_nrank+1]; unsigned long int * counts = new unsigned long[global_nrank]; unsigned long int * recv_positions = new unsigned long[global_nrank+1]; unsigned long int * recv_counts = new unsigned long[global_nrank]; unsigned long int current_count = 0; for(int i = 0 ; i < global_nrank ; i++) { int point = binary_search_right_border(assignment, i, 0, nnz_l, nnz_l); if(point == -1) { counts[i] = 0; positions[i] = current_count; } else { counts[i] = (point+1) - current_count; positions[i] = current_count; current_count = (point+1); } if(global_myrank == 0) { std::cout << "point: " << point << "\t" << counts[i] << std::endl; } } positions[global_nrank] = nnz_l; MPI_Barrier(MPI_COMM_WORLD); delete [] assignment; MPI_Request* mpi_req = new MPI_Request[2 * global_nrank]; MPI_Status* mpi_status = new MPI_Status[2 * global_nrank]; for (int i = 0; i < global_nrank; i++) { MPI_Isend(&counts[i], 1, MPI_UNSIGNED_LONG, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]); } for (int i = 0; i < global_nrank; i++) { MPI_Irecv(&recv_counts[i], 1, MPI_UNSIGNED_LONG, i, i, MPI_COMM_WORLD, &mpi_req[i + global_nrank]); } MPI_Waitall(2 * global_nrank, mpi_req, mpi_status); MPI_Barrier(MPI_COMM_WORLD); recv_positions[0] = 0; for(int i = 0 ; i < global_nrank ; i++) { new_nnz += recv_counts[i]; recv_positions[i+1] = new_nnz; } printf("Rank %d: After shuffle %d edges\n", global_myrank, new_nnz); MPI_Datatype MPI_EDGE_T; MPI_Type_contiguous(sizeof(edge_t<T>), MPI_CHAR, &MPI_EDGE_T); MPI_Type_commit(&MPI_EDGE_T); for (int i = 0; i < global_nrank; i++) { MPI_Isend(edge_list + positions[i], counts[i] , MPI_EDGE_T, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]); } received_edges = new edge_t<T>[new_nnz]; for (int i = 0; i < global_nrank; i++) { MPI_Irecv(received_edges + recv_positions[i], recv_counts[i] , MPI_EDGE_T, i, i, MPI_COMM_WORLD, &mpi_req[i+global_nrank]); } MPI_Waitall(2 * global_nrank, mpi_req, mpi_status); MPI_Barrier(MPI_COMM_WORLD); } printf("Rank %d: After shuffle %d edges\n", global_myrank, new_nnz); tedge_t<T> * tedges2 = new tedge_t<T>[new_nnz]; #pragma omp parallel for for(unsigned long i = 0 ; i < new_nnz ; i++) { tedges2[i].src = received_edges[i].src; tedges2[i].dst = received_edges[i].dst; tedges2[i].val = received_edges[i].val; int ival, jval; tedges2[i].tile_id = getPartition(received_edges[i].src, received_edges[i].dst, &ival, &jval); assert(tedges2[i].tile_id != -1); } __gnu_parallel::sort(tedges2, tedges2 + new_nnz , compare_tile_id<T>); int * assignment2 = new int[new_nnz]; #pragma omp parallel for for(unsigned long i = 0 ; i < new_nnz ; i++) { received_edges[i].src = tedges2[i].src; received_edges[i].dst = tedges2[i].dst; received_edges[i].val = tedges2[i].val; assignment2[i] = tedges2[i].tile_id; } delete [] tedges2; for (int tile_j = 0; tile_j < ntiles_x; tile_j++) { for (int tile_i = 0; tile_i < ntiles_y; tile_i++) { if (nodeIds[tile_i + tile_j * ntiles_y] == global_myrank) { int tile_m = start_idy[tile_i + 1] - start_idy[tile_i]; int tile_n = start_idx[tile_j + 1] - start_idx[tile_j]; int this_tile_id = tile_i + tile_j * ntiles_y; // Find left and right int start_nz = binary_search_left_border(assignment2, this_tile_id, 0, new_nnz, new_nnz); int end_nz = binary_search_right_border(assignment2, this_tile_id, 0, new_nnz, new_nnz) + 1; int nnz = 0; if((start_nz != -1) && (end_nz != -1)) { nnz = end_nz - start_nz; } if (nnz <= 0) { tiles[tile_i][tile_j] = SpTile(tile_m, tile_n); std::stringstream ss; ss << "LoadedEmpty_" << tile_i << "," << tile_j; tiles[tile_i][tile_j].name = ss.str(); } else { tiles[tile_i][tile_j] = SpTile(received_edges + start_nz, tile_m, tile_n, nnz, start_idy[tile_i], start_idx[tile_j]); std::stringstream ss; ss << "Loaded_" << tile_i << "," << tile_j; tiles[tile_i][tile_j].name = ss.str(); } } } } delete [] assignment2; delete [] received_edges; MPI_Barrier(MPI_COMM_WORLD); } void print_tiles(std::string msg, int output_rank) { MPI_Barrier(MPI_COMM_WORLD); { if (global_myrank == output_rank) { std::cout << "Rank " << global_myrank << "\t" << msg << std::endl; for (int i = 0; i < ntiles_y; i++) { for (int j = 0; j < ntiles_x; j++) { std::cout << nodeIds[i + j * ntiles_y]; } std::cout << std::endl; } } } MPI_Barrier(MPI_COMM_WORLD); } void Allocate2DPartitioned(int m, int n, int _num_tiles_x, int _num_tiles_y, int (*_pfn)(int, int, int, int, int)) { num_tiles_x = _num_tiles_x; num_tiles_y = _num_tiles_y; pfn = _pfn; int vx, vy; int roundup = 256; int ntiles_x = num_tiles_x; int ntiles_y = num_tiles_y; vx = ((((n + ntiles_x - 1) / ntiles_x) + roundup - 1) / roundup) * roundup; vy = ((((m + ntiles_y - 1) / ntiles_y) + roundup - 1) / roundup) * roundup; int* nodeIds = reinterpret_cast<int*>( _mm_malloc(num_tiles_x * num_tiles_y * sizeof(int), 64)); int* startx = reinterpret_cast<int*>(_mm_malloc((num_tiles_x + 1) * sizeof(int), 64)); int* starty = reinterpret_cast<int*>(_mm_malloc((num_tiles_y + 1) * sizeof(int), 64)); for (int i = 0; i < num_tiles_y; i++) { for (int j = 0; j < num_tiles_x; j++) { nodeIds[i + j * num_tiles_y] = pfn(j, i, num_tiles_x, num_tiles_y, global_nrank); } } for (int j = 0; j < num_tiles_x; j++) { startx[j] = std::min(vx * j, n); } for (int i = 0; i < num_tiles_y; i++) { starty[i] = std::min(vy * i, m); } startx[num_tiles_x] = n; starty[num_tiles_y] = m; set(m, n, num_tiles_x, num_tiles_y, nodeIds, startx, starty); } template<typename T> void setElement(const int idx, const int idy, T val) { assert(!empty); int ival, jval; int tile = getPartition(idy, idx, &ival, &jval); assert(tile != -1); if(nodeIds[ival + jval * ntiles_y] == global_myrank) { tiles[ival][jval].set(idx - start_idx[jval], idy - start_idy[ival], val); } } template<typename T> T getElement(const int idx, const int idy) const { assert(!empty); int ival, jval; int tile = getPartition(idy, idx, &ival, &jval); // Is this right? assert(tile != -1); if(nodeIds[ival + jval * ntiles_y] == global_myrank) { return tiles[ival][jval].get(idx - start_idx[jval], idy - start_idy[ival]); } } template <typename T> void get_edges(edgelist_t<T>* edgelist) const { // Get nnz int nnzs = 0; for (int i = 0; i < ntiles_y; i++) { for (int j = 0; j < ntiles_x; j++) { if (nodeIds[i + j * ntiles_y] == global_myrank) { nnzs += tiles[i][j].nnz; } } } edgelist->m = m; edgelist->n = n; edgelist->nnz = nnzs; if(nnzs > 0) { edgelist->edges = reinterpret_cast<edge_t<T>*>( _mm_malloc((uint64_t)nnzs * (uint64_t)sizeof(edge_t<T>), 64)); nnzs = 0; for (int i = 0; i < ntiles_y; i++) { for (int j = 0; j < ntiles_x; j++) { if (nodeIds[i + j * ntiles_y] == global_myrank) { tiles[i][j] .get_edges(edgelist->edges + nnzs, start_idy[i], start_idx[j]); nnzs += tiles[i][j].nnz; } } } } } uint64_t getNNZ() { uint64_t total_nnz = 0; for(int i = 0 ; i < ntiles_y ; i++) { for(int j = 0 ; j < ntiles_x ; j++) { if(nodeIds[i + j * ntiles_y] == global_myrank) { total_nnz += tiles[i][j].nnz; } } } // global reduction MPI_Allreduce(MPI_IN_PLACE, &total_nnz, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); return total_nnz; } }; #endif // SRC_SPMAT_H_
TemporalRowConvolution.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/TemporalRowConvolution.c" #else static inline void THNN_(TemporalRowConvolution_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kW, int dW, int padW) { THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THNN_ARGCHECK(!weight->is_empty() && weight->dim() == 3, 3, weight, "non-empty 3D weight tensor expected, but got: %s"); THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size(0)); } // we're always looking at (possibly batch) x feats x seq int ndim = input->dim(); int dimF = 0; int dimS = 1; if (ndim == 3) { ++dimS; ++dimF; } THNN_ARGCHECK(!input->is_empty() && (ndim == 2 || ndim == 3), 1, input, "non-empty 2D or 3D (batch mode) input tensor expected, but got :%s"); int64_t inputFrameSize = THTensor_sizeLegacyNoScalars(weight, 0); int64_t nInputFrame = input->size(dimS); int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; if (nOutputFrame < 1) { THError("Given input size: (%d x %d). " "Calculated output size: (%d x %d). Output size is too small", inputFrameSize, nInputFrame, inputFrameSize, nOutputFrame); } THNN_CHECK_DIM_SIZE(input, ndim, dimF, inputFrameSize); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimF, inputFrameSize); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimS, nOutputFrame); } } static void THNN_(unfolded_acc_row)( THTensor *finput, THTensor *input, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t c; scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); // #pragma omp parallel for private(c) for (c = 0; c < inputFrameSize; c++) { int64_t kw, x; int64_t ix = 0; for (kw = 0; kw < kW; kw++) { scalar_t *src = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); scalar_t *dst = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { scalar_t *dst_slice = dst + (size_t)(ix); THVector_(cadd)(dst_slice, dst_slice, src, 1, nOutputFrame); } else { for (x = 0; x < nOutputFrame; x++) { scalar_t *dst_slice = dst + (size_t)(ix + x * dW); THVector_(cadd)(dst_slice, dst_slice, src + (size_t)(x), 1, 1); } } } } } static void THNN_(unfolded_copy_row)( THTensor *finput, THTensor *input, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t k; scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); // #pragma omp parallel for private(k) for (k = 0; k < inputFrameSize * kW; k++) { int64_t c = k / kW; int64_t rest = k % kW; int64_t kw = rest % kW; int64_t x; int64_t ix; scalar_t *dst = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); scalar_t *src = input_data + c * (nInputFrame); ix = (size_t)(kw); if (dW == 1) { memcpy(dst, src+(size_t)(ix), sizeof(scalar_t) * (nOutputFrame)); } else { for (x = 0; x < nOutputFrame; x++) { memcpy(dst + (size_t)(x), src + (size_t)(ix + x * dW), sizeof(scalar_t) * 1); } } } } static void THNN_(TemporalRowConvolution_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { int64_t i; THTensor *output3d = THTensor_(newWithStorage3d)( THTensor_getStoragePtr(output), output->storage_offset(), inputFrameSize, -1, 1, -1, nOutputFrame, -1); THNN_(unfolded_copy_row)(finput, input, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); THTensor_(zero)(output); if (bias != NULL) { for (i = 0; i < inputFrameSize; i++) THVector_(fill) (THStorage_(data)(THTensor_getStoragePtr(output)) + output->storage_offset() + output->stride(0) * i, THTensor_(get1d)(bias, i), nOutputFrame); } THTensor_(baddbmm)(output3d, 1, output3d, 1, weight, finput); c10::raw::intrusive_ptr::decref(output3d); } void THNN_(TemporalRowConvolution_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, // unused here but needed for Cuda int kW, int dW, int padW, bool featFirst) { int ndim = input->dim(); THTensor *tinput = NULL; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); } else { input = THTensor_(newContiguous)(input); } THNN_(TemporalRowConvolution_shapeCheck)( state, input, NULL, weight, bias, kW, dW, padW); int64_t inputFrameSize = THTensor_sizeLegacyNoScalars(weight, 0); int64_t nInputFrame = input->size(ndim - 1); int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; if (ndim == 2) { /* non-batch mode */ THTensor_(resize3d)(finput, inputFrameSize, kW, nOutputFrame); THTensor_(resize2d)(output, inputFrameSize, nOutputFrame); THTensor_(zero)(finput); THTensor_(zero)(output); THNN_(TemporalRowConvolution_updateOutput_frame) (input, output, weight, bias, finput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } else { int64_t T = input->size(0); int64_t t; THTensor_(resize4d)(finput, T, inputFrameSize, kW, nOutputFrame); THTensor_(resize3d)(output, T, inputFrameSize, nOutputFrame); THTensor_(zero)(finput); THTensor_(zero)(output); #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(TemporalRowConvolution_updateOutput_frame) (input_t, output_t, weight, bias, finput_t, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); c10::raw::intrusive_ptr::decref(input_t); c10::raw::intrusive_ptr::decref(output_t); c10::raw::intrusive_ptr::decref(finput_t); } } if (!featFirst) { // NOTE: output will NOT be contiguous in this case THTensor_(transpose)(output, output, ndim - 1, ndim - 2); c10::raw::intrusive_ptr::decref(tinput); } c10::raw::intrusive_ptr::decref(input); } static void THNN_(TemporalRowConvolution_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int dW, int padW, int64_t inputFrameSize, int64_t nInputFrame, int64_t nOutputFrame) { THTensor *gradOutput3d = THTensor_(newWithStorage3d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), inputFrameSize, -1, 1, -1, nOutputFrame, -1); // weight: inputFrameSize x kW x 1 // gradOutput3d: inputFrameSize x 1 x nOutputFrame THTensor_(baddbmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput3d); // fgradInput: inputFrameSize x kW x nOutputFrame c10::raw::intrusive_ptr::decref(gradOutput3d); THTensor_(zero)(gradInput); THNN_(unfolded_acc_row)(fgradInput, gradInput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } void THNN_(TemporalRowConvolution_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kW, int dW, int padW, bool featFirst) { int ndim = input->dim(); THTensor *tinput, *tgradOutput; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); gradOutput = THTensor_(newContiguous)(tgradOutput); } else { input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); } THNN_(TemporalRowConvolution_shapeCheck)(state, input, gradOutput, weight, NULL, kW, dW, padW); int64_t inputFrameSize = THTensor_sizeLegacyNoScalars(weight, 0); int64_t nInputFrame = input->size(ndim - 1); int64_t nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; THTensor_(resizeAs)(fgradInput, finput); THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(fgradInput); THTensor_(zero)(gradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 1, 2); if (ndim == 2) { THNN_(TemporalRowConvolution_updateGradInput_frame) (gradInput, gradOutput, tweight, fgradInput, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); } else { int64_t T = input->size(0); int64_t t; #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(TemporalRowConvolution_updateGradInput_frame) (gradInput_t, gradOutput_t, tweight, fgradInput_t, kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); c10::raw::intrusive_ptr::decref(gradInput_t); c10::raw::intrusive_ptr::decref(gradOutput_t); c10::raw::intrusive_ptr::decref(fgradInput_t); } } c10::raw::intrusive_ptr::decref(tweight); if (!featFirst) { // NOTE: gradInput will NOT be contiguous in this case c10::raw::intrusive_ptr::decref(tinput); c10::raw::intrusive_ptr::decref(tgradOutput); THTensor_(transpose)(gradInput, gradInput, ndim - 1, ndim - 2); } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); } static void THNN_(TemporalRowConvolution_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, scalar_t scale) { int64_t i; THTensor *gradOutput3d = THTensor_(newWithStorage3d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, 1, -1, gradOutput->size(1), -1); THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 1, 2); // gradOutput3d: inputFrameSize x 1 x nOutputFrame // finput: inputFrameSize x nOutputFrame x kW THTensor_(baddbmm)(gradWeight, 1, gradWeight, scale, gradOutput3d, tfinput); // gradWeight: inputFrameSize x 1 x kW c10::raw::intrusive_ptr::decref(tfinput); if (gradBias != NULL) { for (i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; scalar_t sum = 0; scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput3d)) + gradOutput3d->storage_offset() + i * gradOutput3d->stride(0); for (k = 0; k < gradOutput3d->size(2); k++) { sum += data[k]; } (THStorage_(data)(THTensor_getStoragePtr(gradBias)) + gradBias->storage_offset())[i] += scale * sum; } } c10::raw::intrusive_ptr::decref(gradOutput3d); } void THNN_(TemporalRowConvolution_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kW, int dW, int padW, bool featFirst, accreal scale_) { scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int ndim = input->dim(); THTensor *tinput = NULL; THTensor *tgradOutput = NULL; if (!featFirst) { tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); input = THTensor_(newContiguous)(tinput); gradOutput = THTensor_(newContiguous)(tgradOutput); } else { input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); } THNN_(TemporalRowConvolution_shapeCheck) (state, input, gradOutput, gradWeight, gradBias, kW, dW, padW); if (ndim == 2) { THNN_(TemporalRowConvolution_accGradParameters_frame)( gradOutput, gradWeight, gradBias, finput, scale); } else { int64_t T = input->size(0); int64_t t; for (t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(TemporalRowConvolution_accGradParameters_frame)( gradOutput_t, gradWeight, gradBias, finput_t, scale); c10::raw::intrusive_ptr::decref(gradOutput_t); c10::raw::intrusive_ptr::decref(finput_t); } } if (!featFirst) { c10::raw::intrusive_ptr::decref(tinput); c10::raw::intrusive_ptr::decref(tgradOutput); } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); } #endif
spmv_csr_mat.c
/* This file is part of HiParTI!. HiParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. HiParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with HiParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <stdlib.h> #include <getopt.h> #include <HiParTI.h> int main(int argc, char * const argv[]) { FILE *fi = NULL, *fo = NULL; ptiSparseMatrix mtx; ptiSparseMatrixCSR csrmtx; ptiValueVector x, y; int niters = 50; ptiTimer timer; ptiNewTimer(&timer, 0); /* OpenMP */ int cuda_dev_id = -2; int nthreads = 1; // get from OMP_NUM_THREADS environment static struct option long_options[] = { {"input", required_argument, 0, 'i'}, {"output", optional_argument, 0, 'o'}, {"cuda-dev-id", optional_argument, 0, 'd'}, {0, 0, 0, 0} }; for(;;) { int option_index = 0; int c = 1; c = getopt_long(argc, argv, "i:o:d:", long_options, &option_index); if(c == -1) { break; } switch(c) { case 'i': fi = fopen(optarg, "r"); ptiAssert(fi != NULL); break; case 'o': fo = fopen(optarg, "w"); ptiAssert(fo != NULL); break; case 'd': sscanf(optarg, "%d", &cuda_dev_id); break; default: abort(); } } printf("niters: %d\n", niters); printf("cuda_dev_id: %d\n", cuda_dev_id); if(cuda_dev_id == -1) { #ifdef HIPARTI_USE_OPENMP #pragma omp parallel nthreads = omp_get_num_threads(); #endif printf("nthreads: %d\n", nthreads); } if(optind > argc || argc < 3) { printf("Usage: %s\n", argv[0]); printf("Options: -i INPUT, --input=INPUT\n"); printf(" -o OUTPUT, --output=OUTPUT\n"); printf(" -d CUDA_DEV_ID, --cuda-dev-id=DEV_ID\n"); printf("\n"); return 1; } /// Load sparse matrix in COO format ptiAssert(ptiLoadSparseMatrix(&mtx, 1, fi) == 0); fclose(fi); ptiRandomValueVector(&(mtx.values)); // to better compare results ptiSparseMatrixStatus(&mtx, stdout); // ptiAssert(ptiDumpSparseMatrix(&mtx, 0, stdout) == 0); /// Convert sparse matrix to CSR format ptiAssert(ptiSparseMatrixToCSR(&csrmtx, &mtx) == 0); ptiFreeSparseMatrix(&mtx); ptiSparseMatrixStatusCSR(&csrmtx, stdout); // ptiAssert(ptiDumpSparseMatrixCSR(&csrmtx, stdout) == 0); /// Initialize values for vectors x and y ptiNewValueVector(&x, csrmtx.ncols, csrmtx.ncols); ptiRandomValueVector(&x); ptiNewValueVector(&y, csrmtx.nrows, csrmtx.nrows); // ptiAssert(ptiDumpValueVector(&x, stdout) == 0); // ptiAssert(ptiDumpValueVector(&y, stdout) == 0); #ifdef HIPARTI_USE_OPENMP #pragma omp parallel nthreads = omp_get_num_threads(); printf("nthreads: %d\n", nthreads); #endif // Warm-up if(cuda_dev_id == -2) { printf("Run ptiSparseMatrixMulVectorCSR:\n"); ptiSparseMatrixMulVectorCSR(&y, &csrmtx, &x); } else if(cuda_dev_id == -1) { printf("Run ptiOmpSparseMatrixMulVectorCSR:\n"); ptiOmpSparseMatrixMulVectorCSR(&y, &csrmtx, &x); } ptiStartTimer(timer); for(int i=0; i<niters; ++i) { if(cuda_dev_id == -2) { ptiSparseMatrixMulVectorCSR(&y, &csrmtx, &x); } else if(cuda_dev_id == -1) { ptiOmpSparseMatrixMulVectorCSR(&y, &csrmtx, &x); } } ptiStopTimer(timer); printf("\n"); double elapsed_time = ptiPrintAverageElapsedTime(timer, niters, "CSR-SpMV"); ptiNnzIndex flops = 2 * csrmtx.nnz; ptiPrintGFLOPS(elapsed_time, flops, "CSR-SpMV"); if(fo != NULL) { ptiAssert(ptiDumpValueVector(&y, fo) == 0); fclose(fo); } ptiFreeSparseMatrixCSR(&csrmtx); ptiFreeValueVector(&x); ptiFreeValueVector(&y); ptiFreeTimer(timer); return 0; }
convolution_winograd_transform_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _r06 = vcvt_f32_bf16(vld1_u16(r0 + 24)); float32x4_t _r07 = vcvt_f32_bf16(vld1_u16(r0 + 28)); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); vst1q_f32(tmp[7][m], _tmp7m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; unsigned short* output0 = out0.row<unsigned short>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); vst1_u16(output0 + 16, vcvt_bf16_f32(_out04)); vst1_u16(output0 + 20, vcvt_bf16_f32(_out05)); output0 += outw * 4; } } } } } static void conv3x3s1_winograd43_transform_input_pack4_bf16s_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const unsigned short* r0 = img0.row<const unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r03 = vcvt_f32_bf16(vld1_u16(r0 + 12)); float32x4_t _r04 = vcvt_f32_bf16(vld1_u16(r0 + 16)); float32x4_t _r05 = vcvt_f32_bf16(vld1_u16(r0 + 20)); float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f); float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f); float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f); float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 24; r0_tm_1 += tiles * 24; r0_tm_2 += tiles * 24; r0_tm_3 += tiles * 24; r0_tm_4 += tiles * 24; r0_tm_5 += tiles * 24; } } } } } static void conv3x3s1_winograd43_transform_output_pack4_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[4][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; unsigned short* output0 = out0.row<unsigned short>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b); float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f); float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f); float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f)); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1_u16(output0, vcvt_bf16_f32(_out00)); vst1_u16(output0 + 4, vcvt_bf16_f32(_out01)); vst1_u16(output0 + 8, vcvt_bf16_f32(_out02)); vst1_u16(output0 + 12, vcvt_bf16_f32(_out03)); output0 += outw * 4; } } } } }
GB_unop__identity_uint32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint32_fc32 // op(A') function: GB_unop_tran__identity_uint32_fc32 // C type: uint32_t // A type: GxB_FC32_t // cast: uint32_t cij = GB_cast_to_uint32_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint32_fc32 ( uint32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix_arithmetic.h
/*************************************************************************** * include/stxxl/bits/containers/matrix_arithmetic.h * * Part of the STXXL. See http://stxxl.org * * Copyright (C) 2010-2011 Raoul Steffen <R-Steffen@gmx.de> * * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) **************************************************************************/ #ifndef STXXL_CONTAINERS_MATRIX_ARITHMETIC_HEADER #define STXXL_CONTAINERS_MATRIX_ARITHMETIC_HEADER #include <foxxll/mng/block_manager.hpp> #include <stxxl/bits/containers/matrix_low_level.h> #include <tlx/math/round_to_power_of_two.hpp> #include <tlx/math/integer_log2.hpp> #include <algorithm> namespace stxxl { #ifndef STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS #define STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS 3 #endif #ifndef STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE #define STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE 2 #endif template <typename ValueType> class column_vector; template <typename ValueType> class row_vector; template <typename ValueType, unsigned BlockSideLength> class swappable_block_matrix; //! \addtogroup matrix //! \{ struct matrix_operation_statistic_dataset { int64_t block_multiplication_calls, block_multiplications_saved_through_zero, block_addition_calls, block_additions_saved_through_zero; matrix_operation_statistic_dataset() : block_multiplication_calls(0), block_multiplications_saved_through_zero(0), block_addition_calls(0), block_additions_saved_through_zero(0) { } matrix_operation_statistic_dataset operator + (const matrix_operation_statistic_dataset& stat) { matrix_operation_statistic_dataset res(*this); res.block_multiplication_calls += stat.block_multiplication_calls; res.block_multiplications_saved_through_zero += stat.block_multiplications_saved_through_zero; res.block_addition_calls += stat.block_addition_calls; res.block_additions_saved_through_zero += stat.block_additions_saved_through_zero; return res; } matrix_operation_statistic_dataset operator - (const matrix_operation_statistic_dataset& stat) { matrix_operation_statistic_dataset res(*this); res.block_multiplication_calls -= stat.block_multiplication_calls; res.block_multiplications_saved_through_zero -= stat.block_multiplications_saved_through_zero; res.block_addition_calls -= stat.block_addition_calls; res.block_additions_saved_through_zero -= stat.block_additions_saved_through_zero; return res; } }; struct matrix_operation_statistic : public foxxll::singleton<matrix_operation_statistic>, public matrix_operation_statistic_dataset { }; struct matrix_operation_statistic_data : public matrix_operation_statistic_dataset { explicit matrix_operation_statistic_data(const matrix_operation_statistic& stat = * matrix_operation_statistic::get_instance()) : matrix_operation_statistic_dataset(stat) { } explicit matrix_operation_statistic_data(const matrix_operation_statistic_dataset& stat) : matrix_operation_statistic_dataset(stat) { } matrix_operation_statistic_data& operator = (const matrix_operation_statistic& stat) { return *this = matrix_operation_statistic_data(stat); } void set() { operator = (*matrix_operation_statistic::get_instance()); } matrix_operation_statistic_data operator + (const matrix_operation_statistic_data& stat) { return matrix_operation_statistic_data(matrix_operation_statistic_dataset(*this) + matrix_operation_statistic_dataset(stat)); } matrix_operation_statistic_data operator - (const matrix_operation_statistic_data& stat) { return matrix_operation_statistic_data(matrix_operation_statistic_dataset(*this) - matrix_operation_statistic_dataset(stat)); } }; std::ostream& operator << (std::ostream& o, const matrix_operation_statistic_data& statsd) { o << "matrix operation statistics" << std::endl; o << "block multiplication calls : " << statsd.block_multiplication_calls << std::endl; o << "block multiplications saved through zero blocks: " << statsd.block_multiplications_saved_through_zero << std::endl; o << "block multiplications performed : " << statsd.block_multiplication_calls - statsd.block_multiplications_saved_through_zero << std::endl; o << "block addition calls : " << statsd.block_addition_calls << std::endl; o << "block additions saved through zero blocks : " << statsd.block_additions_saved_through_zero << std::endl; o << "block additions performed : " << statsd.block_addition_calls - statsd.block_additions_saved_through_zero << std::endl; return o; } //! \} //! matrix low-level operations and tools namespace matrix_local { //! A static_quadtree holds 4^Level elements arranged in a quad tree. //! //! Static quad trees are useful for recursive algorithms with fixed depth //! that partition the in- and output and perform pre- and postcalculations on the partitions. //! The four children of one node are denoted as ul (up left), ur (up right), dl (down left), and dr (down right). template <typename ValueType, unsigned Level> struct static_quadtree { using smaller_static_quadtree = static_quadtree<ValueType, Level - 1>; smaller_static_quadtree ul, ur, dl, dr; static_quadtree(smaller_static_quadtree ul, smaller_static_quadtree ur, smaller_static_quadtree dl, smaller_static_quadtree dr) : ul(ul), ur(ur), dl(dl), dr(dr) { } static_quadtree() { } static_quadtree& operator &= (const static_quadtree& right) { ul &= right.ul, ur &= right.ur; dl &= right.dl, dr &= right.dr; return *this; } static_quadtree& operator += (const static_quadtree& right) { ul += right.ul, ur += right.ur; dl += right.dl, dr += right.dr; return *this; } static_quadtree& operator -= (const static_quadtree& right) { ul -= right.ul, ur -= right.ur; dl -= right.dl, dr -= right.dr; return *this; } static_quadtree operator & (const static_quadtree& right) const { return static_quadtree(ul & right.ul, ur & right.ur, dl & right.dl, dr & right.dr); } static_quadtree operator + (const static_quadtree& right) const { return static_quadtree(ul + right.ul, ur + right.ur, dl + right.dl, dr + right.dr); } static_quadtree operator - (const static_quadtree& right) const { return static_quadtree(ul - right.ul, ur - right.ur, dl - right.dl, dr - right.dr); } }; template <typename ValueType> struct static_quadtree<ValueType, 0> { ValueType val; explicit static_quadtree(const ValueType& v) : val(v) { } static_quadtree() { } operator const ValueType& () const { return val; } operator ValueType& () { return val; } static_quadtree& operator &= (const static_quadtree& right) { val &= right.val; return *this; } static_quadtree& operator += (const static_quadtree& right) { val += right.val; return *this; } static_quadtree& operator -= (const static_quadtree& right) { val -= right.val; return *this; } static_quadtree operator ! () const { return static_quadtree(! val); } static_quadtree operator & (const static_quadtree& right) const { return static_quadtree(val & right.val); } static_quadtree operator + (const static_quadtree& right) const { return static_quadtree(val + right.val); } static_quadtree operator - (const static_quadtree& right) const { return static_quadtree(val - right.val); } }; template <typename ValueType, unsigned BlockSideLength, unsigned Level, bool AExists, bool BExists> struct feedable_strassen_winograd { typedef static_quadtree<bool, Level> zbt; // true <=> is a zero-block using vt = static_quadtree<ValueType, Level>; using smaller_feedable_strassen_winograd_ab = feedable_strassen_winograd<ValueType, BlockSideLength, Level - 1, AExists, BExists>; using smaller_feedable_strassen_winograd_a = feedable_strassen_winograd<ValueType, BlockSideLength, Level - 1, AExists, false>; using smaller_feedable_strassen_winograd_b = feedable_strassen_winograd<ValueType, BlockSideLength, Level - 1, false, BExists>; using smaller_feedable_strassen_winograd_n = feedable_strassen_winograd<ValueType, BlockSideLength, Level - 1, false, false>; using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using block_scheduler_type = typename swappable_block_matrix_type::block_scheduler_type; using internal_block_type = typename block_scheduler_type::internal_block_type; using size_type = typename swappable_block_matrix_type::size_type; const size_type n, m, l; smaller_feedable_strassen_winograd_ab p1, p2; smaller_feedable_strassen_winograd_n p3, p4, p5; smaller_feedable_strassen_winograd_b p6; smaller_feedable_strassen_winograd_a p7; feedable_strassen_winograd( const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col, block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l, const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col) : n(n), m(m), l(l), p1(existing_a, a_from_row, a_from_col, bs_c, n/2, m/2, l/2, existing_b, b_from_row, b_from_col), p2(existing_a, a_from_row, a_from_col + l/2, bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col), p3( bs_c, n/2, m/2, l/2), // NOLINT p4( bs_c, n/2, m/2, l/2), // NOLINT p5( bs_c, n/2, m/2, l/2), // NOLINT p6( bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col + m/2), // NOLINT p7(existing_a, a_from_row + n/2, a_from_col + l/2, bs_c, n/2, m/2, l/2) {} feedable_strassen_winograd( const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col, block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l) : n(n), m(m), l(l), p1(existing_a, a_from_row, a_from_col, bs_c, n/2, m/2, l/2), p2(existing_a, a_from_row, a_from_col + l/2, bs_c, n/2, m/2, l/2), p3( bs_c, n/2, m/2, l/2), // NOLINT p4( bs_c, n/2, m/2, l/2), // NOLINT p5( bs_c, n/2, m/2, l/2), // NOLINT p6( bs_c, n/2, m/2, l/2), // NOLINT p7(existing_a, a_from_row + n/2, a_from_col + l/2, bs_c, n/2, m/2, l/2) {} feedable_strassen_winograd( block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l, const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col) : n(n), m(m), l(l), p1(bs_c, n/2, m/2, l/2, existing_b, b_from_row, b_from_col), p2(bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col), p3(bs_c, n/2, m/2, l/2), p4(bs_c, n/2, m/2, l/2), p5(bs_c, n/2, m/2, l/2), p6(bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col + m/2), p7(bs_c, n/2, m/2, l/2) {} feedable_strassen_winograd( block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l) : n(n), m(m), l(l), p1(bs_c, n / 2, m / 2, l / 2), p2(bs_c, n / 2, m / 2, l / 2), p3(bs_c, n / 2, m / 2, l / 2), p4(bs_c, n / 2, m / 2, l / 2), p5(bs_c, n / 2, m / 2, l / 2), p6(bs_c, n / 2, m / 2, l / 2), p7(bs_c, n / 2, m / 2, l / 2) { } void begin_feeding_a_block(const size_type& block_row, const size_type& block_col, const zbt zb) { typename zbt::smaller_static_quadtree s1 = zb.dl & zb.dr, s2 = s1 & zb.ul, s3 = zb.ul & zb.dl, s4 = zb.ur & s2; p1.begin_feeding_a_block(block_row, block_col, zb.ul); p2.begin_feeding_a_block(block_row, block_col, zb.ur); p3.begin_feeding_a_block(block_row, block_col, s1); p4.begin_feeding_a_block(block_row, block_col, s2); p5.begin_feeding_a_block(block_row, block_col, s3); p6.begin_feeding_a_block(block_row, block_col, s4); p7.begin_feeding_a_block(block_row, block_col, zb.dr); } void feed_a_element(const size_t element_num, const vt v) { typename vt::smaller_static_quadtree s1 = v.dl + v.dr, s2 = s1 - v.ul, s3 = v.ul - v.dl, s4 = v.ur - s2; p1.feed_a_element(element_num, v.ul); p2.feed_a_element(element_num, v.ur); p3.feed_a_element(element_num, s1); p4.feed_a_element(element_num, s2); p5.feed_a_element(element_num, s3); p6.feed_a_element(element_num, s4); p7.feed_a_element(element_num, v.dr); } void end_feeding_a_block(const size_type& block_row, const size_type& block_col, const zbt zb) { typename zbt::smaller_static_quadtree s1 = zb.dl & zb.dr, s2 = s1 & zb.ul, s3 = zb.ul & zb.dl, s4 = zb.ur & s2; p1.end_feeding_a_block(block_row, block_col, zb.ul); p2.end_feeding_a_block(block_row, block_col, zb.ur); p3.end_feeding_a_block(block_row, block_col, s1); p4.end_feeding_a_block(block_row, block_col, s2); p5.end_feeding_a_block(block_row, block_col, s3); p6.end_feeding_a_block(block_row, block_col, s4); p7.end_feeding_a_block(block_row, block_col, zb.dr); } void begin_feeding_b_block(const size_type& block_row, const size_type& block_col, const zbt zb) { typename zbt::smaller_static_quadtree t1 = zb.ur & zb.ul, t2 = zb.dr & t1, t3 = zb.dr & zb.ur, t4 = zb.dl & t2; p1.begin_feeding_b_block(block_row, block_col, zb.ul); p2.begin_feeding_b_block(block_row, block_col, zb.dl); p3.begin_feeding_b_block(block_row, block_col, t1); p4.begin_feeding_b_block(block_row, block_col, t2); p5.begin_feeding_b_block(block_row, block_col, t3); p6.begin_feeding_b_block(block_row, block_col, zb.dr); p7.begin_feeding_b_block(block_row, block_col, t4); } void feed_b_element(const size_t element_num, const vt v) { typename vt::smaller_static_quadtree t1 = v.ur - v.ul, t2 = v.dr - t1, t3 = v.dr - v.ur, t4 = v.dl - t2; p1.feed_b_element(element_num, v.ul); p2.feed_b_element(element_num, v.dl); p3.feed_b_element(element_num, t1); p4.feed_b_element(element_num, t2); p5.feed_b_element(element_num, t3); p6.feed_b_element(element_num, v.dr); p7.feed_b_element(element_num, t4); } void end_feeding_b_block(const size_type& block_row, const size_type& block_col, const zbt zb) { typename zbt::smaller_static_quadtree t1 = zb.ur & zb.ul, t2 = zb.dr & t1, t3 = zb.dr & zb.ur, t4 = zb.dl & t2; p1.end_feeding_b_block(block_row, block_col, zb.ul); p2.end_feeding_b_block(block_row, block_col, zb.dl); p3.end_feeding_b_block(block_row, block_col, t1); p4.end_feeding_b_block(block_row, block_col, t2); p5.end_feeding_b_block(block_row, block_col, t3); p6.end_feeding_b_block(block_row, block_col, zb.dr); p7.end_feeding_b_block(block_row, block_col, t4); } void multiply() { p1.multiply(); p2.multiply(); p3.multiply(); p4.multiply(); p5.multiply(); p6.multiply(); p7.multiply(); } zbt begin_reading_block(const size_type& block_row, const size_type& block_col) { zbt r; r.ur = r.ul = p1.begin_reading_block(block_row, block_col); r.ul &= p2.begin_reading_block(block_row, block_col); r.ur &= p4.begin_reading_block(block_row, block_col); r.dr = r.dl = p5.begin_reading_block(block_row, block_col); r.dl &= r.ur; r.dl &= p7.begin_reading_block(block_row, block_col); r.ur &= p3.begin_reading_block(block_row, block_col); r.dr &= r.ur; r.ur &= p6.begin_reading_block(block_row, block_col); return r; } vt read_element(const size_t element_num) { vt r; r.ur = r.ul = p1.read_element(element_num); r.ul += p2.read_element(element_num); r.ur += p4.read_element(element_num); r.dr = r.dl = p5.read_element(element_num); r.dl += r.ur; r.dl += p7.read_element(element_num); r.ur += p3.read_element(element_num); r.dr += r.ur; r.ur += p6.read_element(element_num); return r; } zbt end_reading_block(const size_type& block_row, const size_type& block_col) { zbt r; r.ur = r.ul = p1.end_reading_block(block_row, block_col); r.ul &= p2.end_reading_block(block_row, block_col); r.ur &= p4.end_reading_block(block_row, block_col); r.dr = r.dl = p5.end_reading_block(block_row, block_col); r.dl &= r.ur; r.dl &= p7.end_reading_block(block_row, block_col); r.ur &= p3.end_reading_block(block_row, block_col); r.dr &= r.ur; r.ur &= p6.end_reading_block(block_row, block_col); return r; } }; template <typename ValueType, unsigned BlockSideLength, bool AExists, bool BExists> struct feedable_strassen_winograd<ValueType, BlockSideLength, 0, AExists, BExists> { typedef static_quadtree<bool, 0> zbt; // true <=> is a zero-block using vt = static_quadtree<ValueType, 0>; using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using block_scheduler_type = typename swappable_block_matrix_type::block_scheduler_type; using internal_block_type = typename block_scheduler_type::internal_block_type; using size_type = typename swappable_block_matrix_type::size_type; swappable_block_matrix_type a, b, c; const size_type n, m, l; internal_block_type* iblock; feedable_strassen_winograd( const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col, block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l, const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col) : a(existing_a, n, l, a_from_row, a_from_col), b(existing_b, n, l, b_from_row, b_from_col), c(bs_c, n, m), n(n), m(m), l(l), iblock(0) { } feedable_strassen_winograd( const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col, block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l) : a(existing_a, n, l, a_from_row, a_from_col), b(bs_c, n, l), c(bs_c, n, m), n(n), m(m), l(l), iblock(0) { } feedable_strassen_winograd( block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l, const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col) : a(bs_c, n, l), b(existing_b, n, l, b_from_row, b_from_col), c(bs_c, n, m), n(n), m(m), l(l), iblock(0) { } feedable_strassen_winograd( block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l) : a(bs_c, n, l), b(bs_c, n, l), c(bs_c, n, m), n(n), m(m), l(l), iblock(0) { } void begin_feeding_a_block(const size_type& block_row, const size_type& block_col, const zbt) { if (! AExists) iblock = &a.bs.acquire(a(block_row, block_col), true); } void feed_a_element(const size_t element_num, const vt v) { if (! AExists) (*iblock)[element_num] = v; } void end_feeding_a_block(const size_type& block_row, const size_type& block_col, const zbt zb) { if (! AExists) { a.bs.release(a(block_row, block_col), ! zb); iblock = 0; } } void begin_feeding_b_block(const size_type& block_row, const size_type& block_col, const zbt) { if (! BExists) iblock = &b.bs.acquire(b(block_row, block_col), true); } void feed_b_element(const size_t element_num, const vt v) { if (! BExists) (*iblock)[element_num] = v; } void end_feeding_b_block(const size_type& block_row, const size_type& block_col, const zbt zb) { if (! BExists) { b.bs.release(b(block_row, block_col), ! zb); iblock = 0; } } void multiply() { matrix_operations<ValueType, BlockSideLength>::choose_level_for_feedable_sw(a, b, c); } zbt begin_reading_block(const size_type& block_row, const size_type& block_col) { zbt zb = zbt(! c.bs.is_initialized(c(block_row, block_col))); iblock = &c.bs.acquire(c(block_row, block_col)); return zb; } vt read_element(const size_t element_num) { return vt((*iblock)[element_num]); } zbt end_reading_block(const size_type& block_row, const size_type& block_col) { c.bs.release(c(block_row, block_col), false); iblock = 0; return zbt(! c.bs.is_initialized(c(block_row, block_col))); } }; template <typename ValueType, unsigned BlockSideLength, unsigned Level> struct matrix_to_quadtree { typedef static_quadtree<bool, Level> zbt; // true <=> is a zero-block using vt = static_quadtree<ValueType, Level>; using smaller_matrix_to_quadtree = matrix_to_quadtree<ValueType, BlockSideLength, Level - 1>; using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using block_scheduler_type = typename swappable_block_matrix_type::block_scheduler_type; using internal_block_type = typename block_scheduler_type::internal_block_type; using size_type = typename swappable_block_matrix_type::size_type; smaller_matrix_to_quadtree ul, ur, dl, dr; explicit matrix_to_quadtree(const swappable_block_matrix_type & matrix) : ul(matrix, matrix.get_height()/2, matrix.get_width()/2, 0, 0), ur(matrix, matrix.get_height()/2, matrix.get_width()/2, 0, matrix.get_width()/2), dl(matrix, matrix.get_height()/2, matrix.get_width()/2, matrix.get_height()/2, 0), dr(matrix, matrix.get_height()/2, matrix.get_width()/2, matrix.get_height()/2, matrix.get_width()/2) { assert(! (matrix.get_height() % 2 | matrix.get_width() % 2)); } matrix_to_quadtree(const swappable_block_matrix_type & matrix, const size_type height, const size_type width, const size_type from_row, const size_type from_col) : ul(matrix, height/2, width/2, from_row, from_col), ur(matrix, height/2, width/2, from_row, from_col + width/2), dl(matrix, height/2, width/2, from_row + height/2, from_col), dr(matrix, height/2, width/2, from_row + height/2, from_col + width/2) { assert(! (height % 2 | width % 2)); } void begin_feeding_block(const size_type& block_row, const size_type& block_col, const zbt zb) { ul.begin_feeding_block(block_row, block_col, zb.ul); ur.begin_feeding_block(block_row, block_col, zb.ur); dl.begin_feeding_block(block_row, block_col, zb.dl); dr.begin_feeding_block(block_row, block_col, zb.dr); } void feed_element(const size_t element_num, const vt v) { ul.feed_element(element_num, v.ul); ur.feed_element(element_num, v.ur); dl.feed_element(element_num, v.dl); dr.feed_element(element_num, v.dr); } void feed_and_add_element(const size_t element_num, const vt v) { ul.feed_and_add_element(element_num, v.ul); ur.feed_and_add_element(element_num, v.ur); dl.feed_and_add_element(element_num, v.dl); dr.feed_and_add_element(element_num, v.dr); } void end_feeding_block(const size_type& block_row, const size_type& block_col, const zbt zb) { ul.end_feeding_block(block_row, block_col, zb.ul); ur.end_feeding_block(block_row, block_col, zb.ur); dl.end_feeding_block(block_row, block_col, zb.dl); dr.end_feeding_block(block_row, block_col, zb.dr); } zbt begin_reading_block(const size_type& block_row, const size_type& block_col) { zbt zb; zb.ul = ul.begin_reading_block(block_row, block_col); zb.ur = ur.begin_reading_block(block_row, block_col); zb.dl = dl.begin_reading_block(block_row, block_col); zb.dr = dr.begin_reading_block(block_row, block_col); return zb; } vt read_element(const size_t element_num) { vt v; v.ul = ul.read_element(element_num); v.ur = ur.read_element(element_num); v.dl = dl.read_element(element_num); v.dr = dr.read_element(element_num); return v; } zbt end_reading_block(const size_type& block_row, const size_type& block_col) { zbt zb; zb.ul = ul.end_reading_block(block_row, block_col); zb.ur = ur.end_reading_block(block_row, block_col); zb.dl = dl.end_reading_block(block_row, block_col); zb.dr = dr.end_reading_block(block_row, block_col); return zb; } const size_type & get_height_in_blocks() { return ul.get_height_in_blocks(); } const size_type & get_width_in_blocks() { return ul.get_width_in_blocks(); } }; template <typename ValueType, unsigned BlockSideLength> struct matrix_to_quadtree<ValueType, BlockSideLength, 0> { typedef static_quadtree<bool, 0> zbt; // true <=> is a zero-block using vt = static_quadtree<ValueType, 0>; using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using block_scheduler_type = typename swappable_block_matrix_type::block_scheduler_type; using internal_block_type = typename block_scheduler_type::internal_block_type; using size_type = typename swappable_block_matrix_type::size_type; swappable_block_matrix_type m; internal_block_type* iblock; explicit matrix_to_quadtree(const swappable_block_matrix_type& matrix) : m(matrix, matrix.get_height(), matrix.get_width(), 0, 0), iblock(0) { } matrix_to_quadtree(const swappable_block_matrix_type& matrix, const size_type height, const size_type width, const size_type from_row, const size_type from_col) : m(matrix, height, width, from_row, from_col), iblock(0) { } void begin_feeding_block(const size_type& block_row, const size_type& block_col, const zbt) { iblock = &m.bs.acquire(m(block_row, block_col)); } void feed_element(const size_t element_num, const vt v) { (*iblock)[element_num] = v; } void feed_and_add_element(const size_t element_num, const vt v) { (*iblock)[element_num] += v; } void end_feeding_block(const size_type& block_row, const size_type& block_col, const zbt zb) { m.bs.release(m(block_row, block_col), ! zb); iblock = 0; } zbt begin_reading_block(const size_type& block_row, const size_type& block_col) { zbt zb = zbt(! m.bs.is_initialized(m(block_row, block_col))); iblock = &m.bs.acquire(m(block_row, block_col)); return zb; } vt read_element(const size_t element_num) { return vt((*iblock)[element_num]); } zbt end_reading_block(const size_type& block_row, const size_type& block_col) { m.bs.release(m(block_row, block_col), false); iblock = 0; return zbt(! m.bs.is_initialized(m(block_row, block_col))); } const size_type & get_height_in_blocks() { return m.get_height(); } const size_type & get_width_in_blocks() { return m.get_width(); } }; template <typename ValueType, unsigned BlockSideLength, unsigned Level, bool AExists, bool BExists> struct feedable_strassen_winograd_block_grained { typedef static_quadtree<bool, Level> zbt; // true <=> is a zero-block using vt = static_quadtree<ValueType, Level>; using smaller_feedable_strassen_winograd_ab = feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level - 1, AExists, BExists>; using smaller_feedable_strassen_winograd_a = feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level - 1, AExists, false>; using smaller_feedable_strassen_winograd_b = feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level - 1, false, BExists>; using smaller_feedable_strassen_winograd_n = feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level - 1, false, false>; using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using block_scheduler_type = typename swappable_block_matrix_type::block_scheduler_type; using internal_block_type = typename block_scheduler_type::internal_block_type; using size_type = typename swappable_block_matrix_type::size_type; using Ops = matrix_operations<ValueType, BlockSideLength>; const size_type n, m, l; smaller_feedable_strassen_winograd_ab p1, p2; smaller_feedable_strassen_winograd_n p3, p4, p5; smaller_feedable_strassen_winograd_b p6; smaller_feedable_strassen_winograd_a p7; inline feedable_strassen_winograd_block_grained( const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col, block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l, const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col) : n(n), m(m), l(l), p1(existing_a, a_from_row, a_from_col, bs_c, n/2, m/2, l/2, existing_b, b_from_row, b_from_col), p2(existing_a, a_from_row, a_from_col + l/2, bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col), p3( bs_c, n/2, m/2, l/2), // NOLINT p4( bs_c, n/2, m/2, l/2), // NOLINT p5( bs_c, n/2, m/2, l/2), // NOLINT p6( bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col + m/2), // NOLINT p7(existing_a, a_from_row + n/2, a_from_col + l/2, bs_c, n/2, m/2, l/2) {} inline feedable_strassen_winograd_block_grained( const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col, block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l) : n(n), m(m), l(l), p1(existing_a, a_from_row, a_from_col, bs_c, n/2, m/2, l/2), p2(existing_a, a_from_row, a_from_col + l/2, bs_c, n/2, m/2, l/2), p3( bs_c, n/2, m/2, l/2), // NOLINT p4( bs_c, n/2, m/2, l/2), // NOLINT p5( bs_c, n/2, m/2, l/2), // NOLINT p6( bs_c, n/2, m/2, l/2), // NOLINT p7(existing_a, a_from_row + n/2, a_from_col + l/2, bs_c, n/2, m/2, l/2) {} inline feedable_strassen_winograd_block_grained( block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l, const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col) : n(n), m(m), l(l), p1(bs_c, n/2, m/2, l/2, existing_b, b_from_row, b_from_col), p2(bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col), p3(bs_c, n/2, m/2, l/2), p4(bs_c, n/2, m/2, l/2), p5(bs_c, n/2, m/2, l/2), p6(bs_c, n/2, m/2, l/2, existing_b, b_from_row + l/2, b_from_col + m/2), p7(bs_c, n/2, m/2, l/2) {} inline feedable_strassen_winograd_block_grained( block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l) : n(n), m(m), l(l), p1(bs_c, n / 2, m / 2, l / 2), p2(bs_c, n / 2, m / 2, l / 2), p3(bs_c, n / 2, m / 2, l / 2), p4(bs_c, n / 2, m / 2, l / 2), p5(bs_c, n / 2, m / 2, l / 2), p6(bs_c, n / 2, m / 2, l / 2), p7(bs_c, n / 2, m / 2, l / 2) { } inline void feed_a(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl) { // partition bl typename Ops::swappable_block_matrix_quarterer qbl(bl); // preadditions swappable_block_matrix_type s1(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()), s2(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()), s3(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()), s4(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()); Ops::strassen_winograd_preaddition_a(qbl.ul, qbl.ur, qbl.dl, qbl.dr, s1, s2, s3, s4); // feed recursive p1.feed_a(row, col, qbl.ul); p2.feed_a(row, col, qbl.ur); p3.feed_a(row, col, s1); p4.feed_a(row, col, s2); p5.feed_a(row, col, s3); p6.feed_a(row, col, s4); p7.feed_a(row, col, qbl.dr); } inline void feed_b(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl) { // partition bl typename Ops::swappable_block_matrix_quarterer qbl(bl); // preadditions swappable_block_matrix_type t1(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()), t2(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()), t3(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()), t4(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()); Ops::strassen_winograd_preaddition_b(qbl.ul, qbl.ur, qbl.dl, qbl.dr, t1, t2, t3, t4); // feed recursive p1.feed_b(row, col, qbl.ul); p2.feed_b(row, col, qbl.dl); p3.feed_b(row, col, t1); p4.feed_b(row, col, t2); p5.feed_b(row, col, t3); p6.feed_b(row, col, qbl.dr); p7.feed_b(row, col, t4); } inline void multiply() { p1.multiply(); p2.multiply(); p3.multiply(); p4.multiply(); p5.multiply(); p6.multiply(); p7.multiply(); } inline void read_and_add(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl) { // partition bl typename Ops::swappable_block_matrix_quarterer qbl(bl); // postadditions swappable_block_matrix_type px(bl.bs, qbl.ul.get_height(), qbl.ul.get_width(), qbl.ul.is_transposed()); p2.read_and_add(row, col, qbl.ul); p1.read_and_add(row, col, px); Ops::element_op(qbl.ul, px, typename Ops::addition()); p4.read_and_add(row, col, px); Ops::element_op(qbl.ur, px, typename Ops::addition()); p5.read_and_add(row, col, px); Ops::element_op_twice_nontransposed(qbl.dl, qbl.dr, px, typename Ops::addition()); px.set_zero(); p7.read_and_add(row, col, qbl.dl); p3.read_and_add(row, col, px); Ops::element_op_twice_nontransposed(qbl.dr, qbl.ur, px, typename Ops::addition()); p6.read_and_add(row, col, qbl.ur); } inline static size_t get_num_temp_grains() { return smaller_feedable_strassen_winograd_ab::get_num_temp_grains() + (4 ^ Level) * 2; } }; template <typename ValueType, unsigned BlockSideLength, bool AExists, bool BExists> struct feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, 0, AExists, BExists> { using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using block_scheduler_type = typename swappable_block_matrix_type::block_scheduler_type; using swappable_block_identifier_type = typename swappable_block_matrix_type::swappable_block_identifier_type; using size_type = typename swappable_block_matrix_type::size_type; using Ops = matrix_operations<ValueType, BlockSideLength>; using bt = static_quadtree<swappable_block_identifier_type, 0>; swappable_block_matrix_type a, b, c; inline feedable_strassen_winograd_block_grained( const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col, block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l, const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col) : a(existing_a, n, l, a_from_row, a_from_col), b(existing_b, n, l, b_from_row, b_from_col), c(bs_c, n, m) { } inline feedable_strassen_winograd_block_grained( const swappable_block_matrix_type& existing_a, const size_type a_from_row, const size_type a_from_col, block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l) : a(existing_a, n, l, a_from_row, a_from_col), b(bs_c, n, l), c(bs_c, n, m) { } inline feedable_strassen_winograd_block_grained( block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l, const swappable_block_matrix_type& existing_b, const size_type b_from_row, const size_type b_from_col) : a(bs_c, n, l), b(existing_b, n, l, b_from_row, b_from_col), c(bs_c, n, m) { } inline feedable_strassen_winograd_block_grained( block_scheduler_type& bs_c, const size_type n, const size_type m, const size_type l) : a(bs_c, n, l), b(bs_c, n, l), c(bs_c, n, m) { } inline void feed_a(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl) { if (! AExists) { // copy bl to a from (row, col) (assuming a from (row, col) == 0) swappable_block_matrix_type at(a, bl.get_height(), bl.get_width(), row, col); Ops::element_op(at, bl, typename Ops::addition()); } } inline void feed_b(const size_type& row, const size_type& col, const swappable_block_matrix_type& bl) { if (! BExists) { // copy bl(0,0) to b(row, col) (assuming b from (row, col) == 0) swappable_block_matrix_type bt(b, bl.get_height(), bl.get_width(), row, col); Ops::element_op(bt, bl, typename Ops::addition()); } } inline void multiply() { matrix_operations<ValueType, BlockSideLength>:: multi_level_strassen_winograd_multiply_and_add_block_grained(a, b, c); if (! AExists) a.set_zero(); if (! BExists) b.set_zero(); } inline void read_and_add(const size_type& row, const size_type& col, swappable_block_matrix_type& bl) { // add c from (row, col) to bl swappable_block_matrix_type ct(c, bl.get_height(), bl.get_width(), row, col); Ops::element_op(bl, ct, typename Ops::addition()); ct.set_zero(); } inline static size_t get_num_temp_grains() { return 0; } }; template <typename ValueType, unsigned BlockSideLength, unsigned Level, unsigned Granularity> struct matrix_to_quadtree_block_grained { using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using size_type = typename swappable_block_matrix_type::size_type; using smaller_matrix_to_quadtree_block_grained = matrix_to_quadtree_block_grained<ValueType, BlockSideLength, Level - 1, Granularity>; smaller_matrix_to_quadtree_block_grained ul, ur, dl, dr; explicit matrix_to_quadtree_block_grained(const swappable_block_matrix_type & matrix) : ul(matrix, matrix.get_height()/2, matrix.get_width()/2, 0, 0), ur(matrix, matrix.get_height()/2, matrix.get_width()/2, 0, matrix.get_width()/2), dl(matrix, matrix.get_height()/2, matrix.get_width()/2, matrix.get_height()/2, 0), dr(matrix, matrix.get_height()/2, matrix.get_width()/2, matrix.get_height()/2, matrix.get_width()/2) { assert(! (matrix.get_height() % 2 | matrix.get_width() % 2)); } inline matrix_to_quadtree_block_grained(const swappable_block_matrix_type & matrix, const size_type height, const size_type width, const size_type from_row, const size_type from_col) : ul(matrix, height/2, width/2, from_row, from_col), ur(matrix, height/2, width/2, from_row, from_col + width/2), dl(matrix, height/2, width/2, from_row + height/2, from_col), dr(matrix, height/2, width/2, from_row + height/2, from_col + width/2) { assert(! (height % 2 | width % 2)); } inline swappable_block_matrix_type operator () (const size_type& row, const size_type& col) { return swappable_block_matrix_type(ul(row, col), ur(row, col), dl(row, col), dr(row, col)); } inline const size_type get_height() { return ul.get_height(); } inline const size_type get_width() { return ul.get_width(); } }; template <typename ValueType, unsigned BlockSideLength, unsigned Granularity> struct matrix_to_quadtree_block_grained<ValueType, BlockSideLength, 0, Granularity> { using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using size_type = typename swappable_block_matrix_type::size_type; swappable_block_matrix_type m; explicit matrix_to_quadtree_block_grained(const swappable_block_matrix_type& matrix) : m(matrix, matrix.get_height(), matrix.get_width(), 0, 0) { assert(! (matrix.get_height() % Granularity | matrix.get_width() % Granularity)); } inline matrix_to_quadtree_block_grained(const swappable_block_matrix_type& matrix, const size_type height, const size_type width, const size_type from_row, const size_type from_col) : m(matrix, height, width, from_row, from_col) { assert(! (matrix.get_height() % Granularity | matrix.get_width() % Granularity)); } inline swappable_block_matrix_type operator () (const size_type& row, const size_type& col) { return swappable_block_matrix_type(m, Granularity, Granularity, row * Granularity, col * Granularity); } inline const size_type get_height() { return m.get_height() / Granularity; } inline const size_type get_width() { return m.get_width() / Granularity; } }; template <typename ValueType, unsigned BlockSideLength> struct matrix_operations { // tuning-parameter: Only matrices larger than this (in blocks) are processed by Strassen-Winograd. // you have to adapt choose_level_for_feedable_sw, too static const unsigned strassen_winograd_base_case_size; using swappable_block_matrix_type = swappable_block_matrix<ValueType, BlockSideLength>; using block_scheduler_type = typename swappable_block_matrix_type::block_scheduler_type; using swappable_block_identifier_type = typename swappable_block_matrix_type::swappable_block_identifier_type; using internal_block_type = typename block_scheduler_type::internal_block_type; using size_type = typename swappable_block_matrix_type::size_type; using column_vector_type = column_vector<ValueType>; using row_vector_type = row_vector<ValueType>; using vector_size_type = typename column_vector_type::size_type; // +-+-+-+ addition +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ struct addition { /* op(c,a,b) means c = a <op> b e.g. assign sum * op(c,a) means c <op>= a e.g. add up * op(a) means <op>a e.g. sign * * it should hold: * op(c,0,0) equivalent c = 0 * op(c=0,a) equivalent c = op(a) * op(c,0) equivalent {} */ inline ValueType& operator () (ValueType& c, const ValueType& a, const ValueType& b) { return c = a + b; } inline ValueType& operator () (ValueType& c, const ValueType& a) { return c += a; } inline ValueType operator () (const ValueType& a) { return +a; } }; struct subtraction { inline ValueType& operator () (ValueType& c, const ValueType& a, const ValueType& b) { return c = a - b; } inline ValueType& operator () (ValueType& c, const ValueType& a) { return c -= a; } inline ValueType operator () (const ValueType& a) { return -a; } }; struct scalar_multiplication { explicit scalar_multiplication(const ValueType scalar = 1) : s(scalar) { } inline ValueType& operator () (ValueType& c, const ValueType& a) { return c = a * s; } inline ValueType operator () (const ValueType& a) { return a * s; } inline operator const ValueType& () { return s; } const ValueType s; }; // element_op<Op>(C,A,B) calculates C = A <Op> B template <class Op> static swappable_block_matrix_type& element_op(swappable_block_matrix_type& C, const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, Op op = Op()) { for (size_type row = 0; row < C.get_height(); ++row) for (size_type col = 0; col < C.get_width(); ++col) element_op_swappable_block( C(row, col), C.is_transposed(), C.bs, A(row, col), A.is_transposed(), A.bs, B(row, col), B.is_transposed(), B.bs, op); return C; } // element_op<Op>(C,A) calculates C <Op>= A template <class Op> static swappable_block_matrix_type& element_op(swappable_block_matrix_type& C, const swappable_block_matrix_type& A, Op op = Op()) { for (size_type row = 0; row < C.get_height(); ++row) for (size_type col = 0; col < C.get_width(); ++col) element_op_swappable_block( C(row, col), C.is_transposed(), C.bs, A(row, col), A.is_transposed(), A.bs, op); return C; } // element_op<Op>(C) calculates C = <Op>C template <class Op> static swappable_block_matrix_type& element_op(swappable_block_matrix_type& C, Op op = Op()) { for (size_type row = 0; row < C.get_height(); ++row) for (size_type col = 0; col < C.get_width(); ++col) element_op_swappable_block( C(row, col), C.bs, op); return C; } // calculates c = a <Op> b template <class Op> static void element_op_swappable_block( const swappable_block_identifier_type c, const bool c_is_transposed, block_scheduler_type& bs_c, const swappable_block_identifier_type a, bool a_is_transposed, block_scheduler_type& bs_a, const swappable_block_identifier_type b, bool b_is_transposed, block_scheduler_type& bs_b, Op op = Op()) { if (! bs_c.is_simulating()) ++matrix_operation_statistic::get_instance()->block_addition_calls; // check if zero-block (== ! initialized) if (! bs_a.is_initialized(a) && ! bs_b.is_initialized(b)) { // => a and b are zero -> set c zero bs_c.deinitialize(c); if (! bs_c.is_simulating()) ++matrix_operation_statistic::get_instance()->block_additions_saved_through_zero; return; } a_is_transposed = a_is_transposed != c_is_transposed; b_is_transposed = b_is_transposed != c_is_transposed; if (! bs_a.is_initialized(a)) { // a is zero -> copy b internal_block_type& ic = bs_c.acquire(c, true), & ib = bs_b.acquire(b); if (! bs_c.is_simulating()) { if (b_is_transposed) low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, true, Op>(&ic[0], 0, &ib[0], op); else low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, false, Op>(&ic[0], 0, &ib[0], op); } bs_b.release(b, false); bs_c.release(c, true); } else if (! bs_b.is_initialized(b)) { // b is zero -> copy a internal_block_type& ic = bs_c.acquire(c, true), & ia = bs_a.acquire(a); if (! bs_c.is_simulating()) { if (a_is_transposed) low_level_matrix_binary_ass_op<ValueType, BlockSideLength, true, false, Op>(&ic[0], &ia[0], 0, op); else low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, false, Op>(&ic[0], &ia[0], 0, op); } bs_a.release(a, false); bs_c.release(c, true); } else { internal_block_type& ic = bs_c.acquire(c, true), & ia = bs_a.acquire(a), & ib = bs_b.acquire(b); if (! bs_c.is_simulating()) { if (a_is_transposed) { if (b_is_transposed) low_level_matrix_binary_ass_op<ValueType, BlockSideLength, true, true, Op>(&ic[0], &ia[0], &ib[0], op); else low_level_matrix_binary_ass_op<ValueType, BlockSideLength, true, false, Op>(&ic[0], &ia[0], &ib[0], op); } else { if (b_is_transposed) low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, true, Op>(&ic[0], &ia[0], &ib[0], op); else low_level_matrix_binary_ass_op<ValueType, BlockSideLength, false, false, Op>(&ic[0], &ia[0], &ib[0], op); } } bs_a.release(a, false); bs_b.release(b, false); bs_c.release(c, true); } } // calculates c <op>= a template <class Op> static void element_op_swappable_block( const swappable_block_identifier_type c, const bool c_is_transposed, block_scheduler_type& bs_c, const swappable_block_identifier_type a, const bool a_is_transposed, block_scheduler_type& bs_a, Op op = Op()) { if (! bs_c.is_simulating()) ++matrix_operation_statistic::get_instance()->block_addition_calls; // check if zero-block (== ! initialized) if (! bs_a.is_initialized(a)) { // => b is zero => nothing to do if (! bs_c.is_simulating()) ++matrix_operation_statistic::get_instance()->block_additions_saved_through_zero; return; } const bool c_is_zero = ! bs_c.is_initialized(c); // acquire internal_block_type& ic = bs_c.acquire(c, c_is_zero), & ia = bs_a.acquire(a); // add if (! bs_c.is_simulating()) { if (c_is_zero) { if (c_is_transposed == a_is_transposed) low_level_matrix_unary_op<ValueType, BlockSideLength, false, Op>(&ic[0], &ia[0], op); else low_level_matrix_unary_op<ValueType, BlockSideLength, true, Op>(&ic[0], &ia[0], op); } else { if (c_is_transposed == a_is_transposed) low_level_matrix_unary_ass_op<ValueType, BlockSideLength, false, Op>(&ic[0], &ia[0], op); else low_level_matrix_unary_ass_op<ValueType, BlockSideLength, true, Op>(&ic[0], &ia[0], op); } } // release bs_c.release(c, true); bs_a.release(a, false); } // calculates c = <op>c template <class Op> static void element_op_swappable_block( const swappable_block_identifier_type c, block_scheduler_type& bs_c, Op op = Op()) { if (! bs_c.is_simulating()) ++matrix_operation_statistic::get_instance()->block_addition_calls; // check if zero-block (== ! initialized) if (! bs_c.is_initialized(c)) { // => c is zero => nothing to do if (! bs_c.is_simulating()) ++matrix_operation_statistic::get_instance()->block_additions_saved_through_zero; return; } // acquire internal_block_type& ic = bs_c.acquire(c); // add if (! bs_c.is_simulating()) low_level_matrix_unary_op<ValueType, BlockSideLength, false, Op>(&ic[0], &ic[0], op); // release bs_c.release(c, true); } // additions for strassen-winograd inline static void strassen_winograd_preaddition_a(swappable_block_matrix_type& a11, swappable_block_matrix_type& a12, swappable_block_matrix_type& a21, swappable_block_matrix_type& a22, swappable_block_matrix_type& s1, swappable_block_matrix_type& s2, swappable_block_matrix_type& s3, swappable_block_matrix_type& s4) { for (size_type row = 0; row < a11.get_height(); ++row) for (size_type col = 0; col < a11.get_width(); ++col) { op_swappable_block_nontransposed(s3, a11, subtraction(), a21, row, col); op_swappable_block_nontransposed(s1, a21, addition(), a22, row, col); op_swappable_block_nontransposed(s2, s1, subtraction(), a11, row, col); op_swappable_block_nontransposed(s4, a12, subtraction(), s2, row, col); } } inline static void strassen_winograd_preaddition_b(swappable_block_matrix_type& b11, swappable_block_matrix_type& b12, swappable_block_matrix_type& b21, swappable_block_matrix_type& b22, swappable_block_matrix_type& t1, swappable_block_matrix_type& t2, swappable_block_matrix_type& t3, swappable_block_matrix_type& t4) { for (size_type row = 0; row < b11.get_height(); ++row) for (size_type col = 0; col < b11.get_width(); ++col) { op_swappable_block_nontransposed(t3, b22, subtraction(), b12, row, col); op_swappable_block_nontransposed(t1, b12, subtraction(), b11, row, col); op_swappable_block_nontransposed(t2, b22, subtraction(), t1, row, col); op_swappable_block_nontransposed(t4, b21, subtraction(), t2, row, col); } } inline static void strassen_winograd_postaddition(swappable_block_matrix_type& c11, // = p2 swappable_block_matrix_type& c12, // = p6 swappable_block_matrix_type& c21, // = p7 swappable_block_matrix_type& c22, // = p4 swappable_block_matrix_type& p1, swappable_block_matrix_type& p3, swappable_block_matrix_type& p5) { for (size_type row = 0; row < c11.get_height(); ++row) for (size_type col = 0; col < c11.get_width(); ++col) { op_swappable_block_nontransposed(c11, addition(), p1, row, col); // (u1) op_swappable_block_nontransposed(p1, addition(), c22, row, col); // (u2) op_swappable_block_nontransposed(p5, addition(), p1, row, col); // (u3) op_swappable_block_nontransposed(c21, addition(), p5, row, col); // (u4) op_swappable_block_nontransposed(c22, p5, addition(), p3, row, col); // (u5) op_swappable_block_nontransposed(p1, addition(), p3, row, col); // (u6) op_swappable_block_nontransposed(c12, addition(), p1, row, col); // (u7) } } // calculates c1 += a; c2 += a template <class Op> inline static void element_op_twice_nontransposed(swappable_block_matrix_type& c1, swappable_block_matrix_type& c2, const swappable_block_matrix_type& a, Op op = Op()) { for (size_type row = 0; row < a.get_height(); ++row) for (size_type col = 0; col < a.get_width(); ++col) { element_op_swappable_block( c1(row, col), false, c1.bs, a(row, col), false, a.bs, op); element_op_swappable_block( c2(row, col), false, c2.bs, a(row, col), false, a.bs, op); } } template <class Op> inline static void op_swappable_block_nontransposed(swappable_block_matrix_type& c, swappable_block_matrix_type& a, Op op, swappable_block_matrix_type& b, size_type& row, size_type& col) { element_op_swappable_block( c(row, col), false, c.bs, a(row, col), false, a.bs, b(row, col), false, b.bs, op); } template <class Op> inline static void op_swappable_block_nontransposed(swappable_block_matrix_type& c, Op op, swappable_block_matrix_type& a, size_type& row, size_type& col) { element_op_swappable_block( c(row, col), false, c.bs, a(row, col), false, a.bs, op); } // +-+ end addition +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // +-+-+-+ matrix multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /* n, m and l denote the three dimensions of a matrix multiplication, according to the following ascii-art diagram: * * +--m--+ * +----l-----+ | | +--m--+ * | | | | | | * n A | • l B | = n C | * | | | | | | * +----------+ | | +-----+ * +-----+ * * The index-variables are called i, j, k for dimension * n, m, l . */ // requires height and width divisible by 2 struct swappable_block_matrix_quarterer { swappable_block_matrix_type upleft, upright, downleft, downright, & ul, & ur, & dl, & dr; explicit swappable_block_matrix_quarterer(const swappable_block_matrix_type & whole) : upleft (whole, whole.get_height()/2, whole.get_width()/2, 0, 0), // NOLINT upright (whole, whole.get_height()/2, whole.get_width()/2, 0, whole.get_width()/2), // NOLINT downleft (whole, whole.get_height()/2, whole.get_width()/2, whole.get_height()/2, 0), // NOLINT downright(whole, whole.get_height()/2, whole.get_width()/2, whole.get_height()/2, whole.get_width()/2), ul(upleft), ur(upright), dl(downleft), dr(downright) { assert(! (whole.get_height() % 2 | whole.get_width() % 2)); } }; struct swappable_block_matrix_padding_quarterer { swappable_block_matrix_type upleft, upright, downleft, downright, & ul, & ur, & dl, & dr; explicit swappable_block_matrix_padding_quarterer(const swappable_block_matrix_type & whole) : upleft (whole, foxxll::div_ceil(whole.get_height(),2), foxxll::div_ceil(whole.get_width(),2), 0, 0), // NOLINT upright (whole, foxxll::div_ceil(whole.get_height(),2), foxxll::div_ceil(whole.get_width(),2), 0, foxxll::div_ceil(whole.get_width(),2)), // NOLINT downleft (whole, foxxll::div_ceil(whole.get_height(),2), foxxll::div_ceil(whole.get_width(),2), foxxll::div_ceil(whole.get_height(),2), 0), // NOLINT downright(whole, foxxll::div_ceil(whole.get_height(),2), foxxll::div_ceil(whole.get_width(),2), foxxll::div_ceil(whole.get_height(),2), foxxll::div_ceil(whole.get_width(),2)), // NOLINT ul(upleft), ur(upright), dl(downleft), dr(downright) {} }; struct swappable_block_matrix_approximative_quarterer { swappable_block_matrix_type upleft, upright, downleft, downright, & ul, & ur, & dl, & dr; explicit swappable_block_matrix_approximative_quarterer(const swappable_block_matrix_type & whole) : upleft (whole, whole.get_height()/2, whole.get_width()/2, 0, 0), // NOLINT upright (whole, whole.get_height()/2, whole.get_width() - whole.get_width()/2, 0, whole.get_width()/2), // NOLINT downleft (whole, whole.get_height() - whole.get_height()/2, whole.get_width()/2, whole.get_height()/2, 0), // NOLINT downright(whole, whole.get_height() - whole.get_height()/2, whole.get_width() - whole.get_width()/2, whole.get_height()/2, whole.get_width()/2), ul(upleft), ur(upright), dl(downleft), dr(downright) {} }; //! calculates C = A * B + C // requires fitting dimensions static swappable_block_matrix_type& multi_level_strassen_winograd_multiply_and_add_block_grained(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { size_t num_levels = tlx::integer_log2_ceil(std::min(A.get_width(), std::min(C.get_width(), C.get_height()))); if (num_levels > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE) { if (num_levels > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS) num_levels = STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS; swappable_block_matrix_type padded_a( A, foxxll::round_up_to_power_of_two(A.get_height(), num_levels), foxxll::round_up_to_power_of_two(A.get_width(), num_levels), 0, 0), padded_b(B, foxxll::round_up_to_power_of_two(B.get_height(), num_levels), foxxll::round_up_to_power_of_two(B.get_width(), num_levels), 0, 0), padded_c(C, foxxll::round_up_to_power_of_two(C.get_height(), num_levels), foxxll::round_up_to_power_of_two(C.get_width(), num_levels), 0, 0); switch (num_levels) { #if (STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS >= 5 && 5 > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE) case 5: use_feedable_sw_block_grained<5>(padded_a, padded_a, padded_c); break; #endif #if (STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS >= 4 && 4 > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE) case 4: use_feedable_sw_block_grained<4>(padded_a, padded_a, padded_c); break; #endif #if (STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS >= 3 && 3 > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE) case 3: use_feedable_sw_block_grained<3>(padded_a, padded_a, padded_c); break; #endif #if (STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_MAX_NUM_LEVELS >= 2 && 2 > STXXL_MATRIX_MULTI_LEVEL_STRASSEN_WINOGRAD_BASE_CASE) case 2: use_feedable_sw_block_grained<2>(padded_a, padded_a, padded_c); break; #endif default: // only here in case of wrong bounds strassen_winograd_multiply_and_add_interleaved(A, B, C); break; } } else // base case strassen_winograd_multiply_and_add_interleaved(A, B, C); return C; } // input matrices have to be padded template <unsigned Level> static void use_feedable_sw_block_grained(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { const unsigned granularity = 1; feedable_strassen_winograd_block_grained<ValueType, BlockSideLength, Level, true, true> fsw(A, 0, 0, C.bs, C.get_height(), C.get_width(), A.get_width(), B, 0, 0); // preadditions for A { matrix_to_quadtree_block_grained<ValueType, BlockSideLength, Level, granularity> mtq_a(A); for (size_type row = 0; row < mtq_a.get_height(); ++row) for (size_type col = 0; col < mtq_a.get_width(); ++col) fsw.feed_a(row, col, mtq_a(row, col)); } // preadditions for B { matrix_to_quadtree_block_grained<ValueType, BlockSideLength, Level, granularity> mtq_b(B); for (size_type row = 0; row < mtq_b.get_height(); ++row) for (size_type col = 0; col < mtq_b.get_width(); ++col) fsw.feed_b(row, col, mtq_b(row, col)); } // recursive multiplications fsw.multiply(); // postadditions { matrix_to_quadtree_block_grained<ValueType, BlockSideLength, Level, granularity> mtq_c(C); for (size_type row = 0; row < mtq_c.get_height(); ++row) for (size_type col = 0; col < mtq_c.get_width(); ++col) fsw.read_and_add(row, col, mtq_c(row, col)); } } //! calculates C = A * B + C // requires fitting dimensions static swappable_block_matrix_type& multi_level_strassen_winograd_multiply_and_add(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { size_t p = tlx::integer_log2_ceil(std::min(A.get_width(), std::min(C.get_width(), C.get_height()))); swappable_block_matrix_type padded_a( A, foxxll::round_up_to_power_of_two(A.get_height(), p), foxxll::round_up_to_power_of_two(A.get_width(), p), 0, 0), padded_b(B, foxxll::round_up_to_power_of_two(B.get_height(), p), foxxll::round_up_to_power_of_two(B.get_width(), p), 0, 0), padded_c(C, foxxll::round_up_to_power_of_two(C.get_height(), p), foxxll::round_up_to_power_of_two(C.get_width(), p), 0, 0); choose_level_for_feedable_sw(padded_a, padded_b, padded_c); return C; } // input matrices have to be padded static void choose_level_for_feedable_sw(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { switch (tlx::integer_log2_ceil(std::min(A.get_width(), std::min(C.get_width(), C.get_height())))) { default: /* use_feedable_sw<4>(A, B, C); break; case 3: use_feedable_sw<3>(A, B, C); break; case 2:*/ use_feedable_sw<2>(A, B, C); break; case 1: /*use_feedable_sw<1>(A, B, C); break;*/ case 0: // base case recursive_multiply_and_add(A, B, C); break; } } // input matrices have to be padded template <unsigned Level> static void use_feedable_sw(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { feedable_strassen_winograd<ValueType, BlockSideLength, Level, true, true> fsw(A, 0, 0, C.bs, C.get_height(), C.get_width(), A.get_width(), B, 0, 0); // preadditions for A matrix_to_quadtree<ValueType, BlockSideLength, Level> mtq_a(A); for (size_type block_row = 0; block_row < mtq_a.get_height_in_blocks(); ++block_row) for (size_type block_col = 0; block_col < mtq_a.get_width_in_blocks(); ++block_col) { fsw.begin_feeding_a_block(block_row, block_col, mtq_a.begin_reading_block(block_row, block_col)); #if STXXL_PARALLEL #pragma omp parallel for #endif for (unsigned int element_row_in_block = 0; element_row_in_block < BlockSideLength; ++element_row_in_block) for (unsigned int element_col_in_block = 0; element_col_in_block < BlockSideLength; ++element_col_in_block) fsw.feed_a_element(element_row_in_block * BlockSideLength + element_col_in_block, mtq_a.read_element(element_row_in_block * BlockSideLength + element_col_in_block)); fsw.end_feeding_a_block(block_row, block_col, mtq_a.end_reading_block(block_row, block_col)); } // preadditions for B matrix_to_quadtree<ValueType, BlockSideLength, Level> mtq_b(B); for (size_type block_row = 0; block_row < mtq_b.get_height_in_blocks(); ++block_row) for (size_type block_col = 0; block_col < mtq_b.get_width_in_blocks(); ++block_col) { fsw.begin_feeding_b_block(block_row, block_col, mtq_b.begin_reading_block(block_row, block_col)); #if STXXL_PARALLEL #pragma omp parallel for #endif for (int64_t element_row_in_block = 0; element_row_in_block < BlockSideLength; ++element_row_in_block) for (unsigned int element_col_in_block = 0; element_col_in_block < BlockSideLength; ++element_col_in_block) fsw.feed_b_element(element_row_in_block * BlockSideLength + element_col_in_block, mtq_b.read_element(element_row_in_block * BlockSideLength + element_col_in_block)); fsw.end_feeding_b_block(block_row, block_col, mtq_b.end_reading_block(block_row, block_col)); } // recursive multiplications fsw.multiply(); // postadditions matrix_to_quadtree<ValueType, BlockSideLength, Level> mtq_c(C); for (size_type block_row = 0; block_row < mtq_c.get_height_in_blocks(); ++block_row) for (size_type block_col = 0; block_col < mtq_c.get_width_in_blocks(); ++block_col) { mtq_c.begin_feeding_block(block_row, block_col, fsw.begin_reading_block(block_row, block_col)); #if STXXL_PARALLEL #pragma omp parallel for #endif for (int64_t element_row_in_block = 0; element_row_in_block < BlockSideLength; ++element_row_in_block) for (unsigned int element_col_in_block = 0; element_col_in_block < BlockSideLength; ++element_col_in_block) mtq_c.feed_and_add_element(element_row_in_block * BlockSideLength + element_col_in_block, fsw.read_element(element_row_in_block * BlockSideLength + element_col_in_block)); mtq_c.end_feeding_block(block_row, block_col, fsw.end_reading_block(block_row, block_col)); } } //! calculates C = A * B // assumes fitting dimensions static swappable_block_matrix_type& strassen_winograd_multiply(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { // base case if (C.get_height() <= strassen_winograd_base_case_size || C.get_width() <= strassen_winograd_base_case_size || A.get_width() <= strassen_winograd_base_case_size) { C.set_zero(); return recursive_multiply_and_add(A, B, C); } // partition matrix swappable_block_matrix_padding_quarterer qa(A), qb(B), qc(C); // preadditions swappable_block_matrix_type s1(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()), s2(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()), s3(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()), s4(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()), t1(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()), t2(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()), t3(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()), t4(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()); strassen_winograd_preaddition_a(qa.ul, qa.ur, qa.dl, qa.dr, s1, s2, s3, s4); strassen_winograd_preaddition_b(qb.ul, qb.ur, qb.dl, qb.dr, t1, t2, t3, t4); // recursive multiplications swappable_block_matrix_type p1(C.bs, qc.ul.get_height(), qc.ul.get_width(), qc.ul.is_transposed()), // p2 stored in qc.ul p3(C.bs, qc.ul.get_height(), qc.ul.get_width(), qc.ul.is_transposed()), // p4 stored in qc.dr p5(C.bs, qc.ul.get_height(), qc.ul.get_width(), qc.ul.is_transposed()); // p6 stored in qc.ur // p7 stored in qc.dl strassen_winograd_multiply(qa.ul, qb.ul, p1); strassen_winograd_multiply(qa.ur, qb.dl, qc.ul); strassen_winograd_multiply(s1, t1, p3); strassen_winograd_multiply(s2, t2, qc.dr); strassen_winograd_multiply(s3, t3, p5); strassen_winograd_multiply(s4, qb.dr, qc.ur); strassen_winograd_multiply(qa.dr, t4, qc.dl); // postadditions strassen_winograd_postaddition(qc.ul, qc.ur, qc.dl, qc.dr, p1, p3, p5); return C; } //! calculates C = A * B + C // assumes fitting dimensions static swappable_block_matrix_type& strassen_winograd_multiply_and_add_interleaved(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { // base case if (C.get_height() <= strassen_winograd_base_case_size || C.get_width() <= strassen_winograd_base_case_size || A.get_width() <= strassen_winograd_base_case_size) return recursive_multiply_and_add(A, B, C); // partition matrix swappable_block_matrix_padding_quarterer qa(A), qb(B), qc(C); // preadditions swappable_block_matrix_type s1(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()), s2(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()), s3(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()), s4(C.bs, qa.ul.get_height(), qa.ul.get_width(), qa.ul.is_transposed()), t1(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()), t2(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()), t3(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()), t4(C.bs, qb.ul.get_height(), qb.ul.get_width(), qb.ul.is_transposed()); strassen_winograd_preaddition_a(qa.ul, qa.ur, qa.dl, qa.dr, s1, s2, s3, s4); strassen_winograd_preaddition_b(qb.ul, qb.ur, qb.dl, qb.dr, t1, t2, t3, t4); // recursive multiplications and postadditions swappable_block_matrix_type px(C.bs, qc.ul.get_height(), qc.ul.get_width(), qc.ul.is_transposed()); strassen_winograd_multiply_and_add_interleaved(qa.ur, qb.dl, qc.ul); // p2 strassen_winograd_multiply_and_add_interleaved(qa.ul, qb.ul, px); // p1 element_op<addition>(qc.ul, px); strassen_winograd_multiply_and_add_interleaved(s2, t2, px); // p4 s2.set_zero(); t2.set_zero(); element_op<addition>(qc.ur, px); strassen_winograd_multiply_and_add_interleaved(s3, t3, px); // p5 s3.set_zero(); t3.set_zero(); element_op_twice_nontransposed<addition>(qc.dl, qc.dr, px); px.set_zero(); strassen_winograd_multiply_and_add_interleaved(qa.dr, t4, qc.dl); // p7 t4.set_zero(); strassen_winograd_multiply_and_add_interleaved(s1, t1, px); // p3 s1.set_zero(); t1.set_zero(); element_op_twice_nontransposed<addition>(qc.dr, qc.ur, px); px.set_zero(); strassen_winograd_multiply_and_add_interleaved(s4, qb.dr, qc.ur); // p6 return C; } //! calculates C = A * B + C // assumes fitting dimensions static swappable_block_matrix_type& strassen_winograd_multiply_and_add(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { // base case if (C.get_height() <= strassen_winograd_base_case_size || C.get_width() <= strassen_winograd_base_case_size || A.get_width() <= strassen_winograd_base_case_size) return recursive_multiply_and_add(A, B, C); // partition matrix swappable_block_matrix_padding_quarterer qa(A), qb(B), qc(C); // preadditions swappable_block_matrix_type s1(C.bs, qa.ul.get_height(), qa.ul.get_width()), s2(C.bs, qa.ul.get_height(), qa.ul.get_width()), s3(C.bs, qa.ul.get_height(), qa.ul.get_width()), s4(C.bs, qa.ul.get_height(), qa.ul.get_width()), t1(C.bs, qb.ul.get_height(), qb.ul.get_width()), t2(C.bs, qb.ul.get_height(), qb.ul.get_width()), t3(C.bs, qb.ul.get_height(), qb.ul.get_width()), t4(C.bs, qb.ul.get_height(), qb.ul.get_width()); element_op<subtraction>(s3, qa.ul, qa.dl); element_op<addition>(s1, qa.dl, qa.dr); element_op<subtraction>(s2, s1, qa.ul); element_op<subtraction>(s4, qa.ur, s2); element_op<subtraction>(t3, qb.dr, qb.ur); element_op<subtraction>(t1, qb.ur, qb.ul); element_op<subtraction>(t2, qb.dr, t1); element_op<subtraction>(t4, qb.dl, t2); // recursive multiplications and postadditions swappable_block_matrix_type px(C.bs, qc.ul.get_height(), qc.ul.get_width()); strassen_winograd_multiply_and_add(qa.ur, qb.dl, qc.ul); // p2 strassen_winograd_multiply_and_add(qa.ul, qb.ul, px); // p1 element_op<addition>(qc.ul, px); strassen_winograd_multiply_and_add(s2, t2, px); // p4 element_op<addition>(qc.ur, px); strassen_winograd_multiply_and_add(s3, t3, px); // p5 element_op<addition>(qc.dl, px); element_op<addition>(qc.dr, px); px.set_zero(); strassen_winograd_multiply_and_add(qa.dr, t4, qc.dl); // p7 strassen_winograd_multiply_and_add(s1, t1, px); // p3 element_op<addition>(qc.dr, px); element_op<addition>(qc.ur, px); strassen_winograd_multiply_and_add(s4, qb.dr, qc.ur); // p6 return C; } //! calculates C = A * B + C // assumes fitting dimensions static swappable_block_matrix_type& recursive_multiply_and_add(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { // catch empty intervals if (C.get_height() * C.get_width() * A.get_width() == 0) return C; // base case if ((C.get_height() == 1) + (C.get_width() == 1) + (A.get_width() == 1) >= 2) return naive_multiply_and_add(A, B, C); // partition matrix swappable_block_matrix_approximative_quarterer qa(A), qb(B), qc(C); // recursive multiplication // The order of recursive calls is optimized to enhance locality. C has priority because it has to be read and written. recursive_multiply_and_add(qa.ul, qb.ul, qc.ul); recursive_multiply_and_add(qa.ur, qb.dl, qc.ul); recursive_multiply_and_add(qa.ur, qb.dr, qc.ur); recursive_multiply_and_add(qa.ul, qb.ur, qc.ur); recursive_multiply_and_add(qa.dl, qb.ur, qc.dr); recursive_multiply_and_add(qa.dr, qb.dr, qc.dr); recursive_multiply_and_add(qa.dr, qb.dl, qc.dl); recursive_multiply_and_add(qa.dl, qb.ul, qc.dl); return C; } //! calculates C = A * B + C // requires fitting dimensions static swappable_block_matrix_type& naive_multiply_and_add(const swappable_block_matrix_type& A, const swappable_block_matrix_type& B, swappable_block_matrix_type& C) { const size_type& n = C.get_height(), & m = C.get_width(), & l = A.get_width(); for (size_type i = 0; i < n; ++i) for (size_type j = 0; j < m; ++j) for (size_type k = 0; k < l; ++k) multiply_and_add_swappable_block(A(i, k), A.is_transposed(), A.bs, B(k, j), B.is_transposed(), B.bs, C(i, j), C.is_transposed(), C.bs); return C; } static void multiply_and_add_swappable_block( const swappable_block_identifier_type a, const bool a_is_transposed, block_scheduler_type& bs_a, const swappable_block_identifier_type b, const bool b_is_transposed, block_scheduler_type& bs_b, const swappable_block_identifier_type c, const bool c_is_transposed, block_scheduler_type& bs_c) { if (! bs_c.is_simulating()) ++matrix_operation_statistic::get_instance()->block_multiplication_calls; // check if zero-block (== ! initialized) if (! bs_a.is_initialized(a) || ! bs_b.is_initialized(b)) { // => one factor is zero => product is zero if (! bs_c.is_simulating()) ++matrix_operation_statistic::get_instance()->block_multiplications_saved_through_zero; return; } // acquire ValueType* ap = bs_a.acquire(a).begin(), * bp = bs_b.acquire(b).begin(), * cp = bs_c.acquire(c).begin(); // multiply if (! bs_c.is_simulating()) low_level_matrix_multiply_and_add<ValueType, BlockSideLength> (ap, a_is_transposed, bp, b_is_transposed, cp, c_is_transposed); // release bs_a.release(a, false); bs_b.release(b, false); bs_c.release(c, true); } // +-+ end matrix multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // +-+-+-+ matrix-vector multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ //! calculates z = A * x static column_vector_type& recursive_matrix_col_vector_multiply_and_add(const swappable_block_matrix_type& A, const column_vector_type& x, column_vector_type& z, const vector_size_type offset_x = 0, const vector_size_type offset_z = 0) { // catch empty intervals if (A.get_height() * A.get_width() == 0) return z; // base case if (A.get_height() == 1 || A.get_width() == 1) return naive_matrix_col_vector_multiply_and_add(A, x, z, offset_x, offset_z); // partition matrix swappable_block_matrix_approximative_quarterer qa(A); // recursive multiplication // The order of recursive calls is optimized to enhance locality. recursive_matrix_col_vector_multiply_and_add(qa.ul, x, z, offset_x, offset_z); recursive_matrix_col_vector_multiply_and_add(qa.ur, x, z, offset_x + qa.ul.get_width(), offset_z); recursive_matrix_col_vector_multiply_and_add(qa.dr, x, z, offset_x + qa.ul.get_width(), offset_z + qa.ul.get_height()); recursive_matrix_col_vector_multiply_and_add(qa.dl, x, z, offset_x, offset_z + qa.ul.get_height()); return z; } static column_vector_type& naive_matrix_col_vector_multiply_and_add(const swappable_block_matrix_type& A, const column_vector_type& x, column_vector_type& z, const vector_size_type offset_x = 0, const vector_size_type offset_z = 0) { for (size_type row = 0; row < A.get_height(); ++row) for (size_type col = 0; col < A.get_width(); ++col) matrix_col_vector_multiply_and_add_swappable_block(A(row, col), A.is_transposed(), A.bs, x, z, (offset_x + col) * BlockSideLength, (offset_z + row) * BlockSideLength); return z; } static void matrix_col_vector_multiply_and_add_swappable_block( const swappable_block_identifier_type a, const bool a_is_transposed, block_scheduler_type& bs_a, const column_vector_type& x, column_vector_type& z, const vector_size_type offset_x = 0, const vector_size_type offset_z = 0) { // check if zero-block (== ! initialized) if (! bs_a.is_initialized(a)) { // => matrix is zero => product is zero return; } // acquire internal_block_type& ia = bs_a.acquire(a); // multiply if (! bs_a.is_simulating()) { unsigned row_limit = std::min(BlockSideLength, unsigned(z.size() - offset_z)), col_limit = std::min(BlockSideLength, unsigned(x.size() - offset_x)); if (a_is_transposed) for (unsigned col = 0; col < col_limit; ++col) for (unsigned row = 0; row < row_limit; ++row) z[offset_z + row] += x[offset_x + col] * ia[row + col * BlockSideLength]; else for (unsigned row = 0; row < row_limit; ++row) for (unsigned col = 0; col < col_limit; ++col) z[offset_z + row] += x[offset_x + col] * ia[row * BlockSideLength + col]; } // release bs_a.release(a, false); } //! calculates z = y * A static row_vector_type& recursive_matrix_row_vector_multiply_and_add(const row_vector_type& y, const swappable_block_matrix_type& A, row_vector_type& z, const vector_size_type offset_y = 0, const vector_size_type offset_z = 0) { // catch empty intervals if (A.get_height() * A.get_width() == 0) return z; // base case if (A.get_height() == 1 || A.get_width() == 1) return naive_matrix_row_vector_multiply_and_add(y, A, z, offset_y, offset_z); // partition matrix swappable_block_matrix_approximative_quarterer qa(A); // recursive multiplication // The order of recursive calls is optimized to enhance locality. recursive_matrix_row_vector_multiply_and_add(y, qa.ul, z, offset_y, offset_z); recursive_matrix_row_vector_multiply_and_add(y, qa.dl, z, offset_y + qa.ul.get_height(), offset_z); recursive_matrix_row_vector_multiply_and_add(y, qa.dr, z, offset_y + qa.ul.get_height(), offset_z + qa.ul.get_width()); recursive_matrix_row_vector_multiply_and_add(y, qa.ur, z, offset_y, offset_z + qa.ul.get_width()); return z; } static row_vector_type& naive_matrix_row_vector_multiply_and_add(const row_vector_type& y, const swappable_block_matrix_type& A, row_vector_type& z, const vector_size_type offset_y = 0, const vector_size_type offset_z = 0) { for (size_type row = 0; row < A.get_height(); ++row) for (size_type col = 0; col < A.get_width(); ++col) matrix_row_vector_multiply_and_add_swappable_block(y, A(row, col), A.is_transposed(), A.bs, z, (offset_y + row) * BlockSideLength, (offset_z + col) * BlockSideLength); return z; } static void matrix_row_vector_multiply_and_add_swappable_block(const row_vector_type& y, const swappable_block_identifier_type a, const bool a_is_transposed, block_scheduler_type& bs_a, row_vector_type& z, const vector_size_type offset_y = 0, const vector_size_type offset_z = 0) { // check if zero-block (== ! initialized) if (! bs_a.is_initialized(a)) { // => matrix is zero => product is zero return; } // acquire internal_block_type& ia = bs_a.acquire(a); // multiply if (! bs_a.is_simulating()) { unsigned row_limit = std::min(BlockSideLength, unsigned(y.size() - offset_y)), col_limit = std::min(BlockSideLength, unsigned(z.size() - offset_z)); if (a_is_transposed) for (unsigned col = 0; col < col_limit; ++col) for (unsigned row = 0; row < row_limit; ++row) z[offset_z + col] += ia[row + col * BlockSideLength] * y[offset_y + row]; else for (unsigned row = 0; row < row_limit; ++row) for (unsigned col = 0; col < col_limit; ++col) z[offset_z + col] += ia[row * BlockSideLength + col] * y[offset_y + row]; } // release bs_a.release(a, false); } // +-+ end matrix-vector multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // +-+-+-+ vector-vector multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ static void recursive_matrix_from_vectors(swappable_block_matrix_type A, const column_vector_type& l, const row_vector_type& r, vector_size_type offset_l = 0, vector_size_type offset_r = 0) { // catch empty intervals if (A.get_height() * A.get_width() == 0) return; // base case if (A.get_height() == 1 || A.get_width() == 1) { naive_matrix_from_vectors(A, l, r, offset_l, offset_r); return; } // partition matrix swappable_block_matrix_approximative_quarterer qa(A); // recursive creation // The order of recursive calls is optimized to enhance locality. recursive_matrix_from_vectors(qa.ul, l, r, offset_l, offset_r); recursive_matrix_from_vectors(qa.ur, l, r, offset_l, offset_r + qa.ul.get_width()); recursive_matrix_from_vectors(qa.dr, l, r, offset_l + qa.ul.get_height(), offset_r + qa.ul.get_width()); recursive_matrix_from_vectors(qa.dl, l, r, offset_l + qa.ul.get_height(), offset_r); } static void naive_matrix_from_vectors(swappable_block_matrix_type A, const column_vector_type& l, const row_vector_type& r, vector_size_type offset_l = 0, vector_size_type offset_r = 0) { for (size_type row = 0; row < A.get_height(); ++row) for (size_type col = 0; col < A.get_width(); ++col) matrix_from_vectors_swappable_block(A(row, col), A.is_transposed(), A.bs, l, r, (offset_l + row) * BlockSideLength, (offset_r + col) * BlockSideLength); } static void matrix_from_vectors_swappable_block(swappable_block_identifier_type a, const bool a_is_transposed, block_scheduler_type& bs_a, const column_vector_type& l, const row_vector_type& r, vector_size_type offset_l, vector_size_type offset_r) { // acquire internal_block_type& ia = bs_a.acquire(a, true); // multiply if (! bs_a.is_simulating()) { unsigned row_limit = std::min(BlockSideLength, unsigned(l.size() - offset_l)), col_limit = std::min(BlockSideLength, unsigned(r.size() - offset_r)); if (a_is_transposed) for (unsigned col = 0; col < col_limit; ++col) for (unsigned row = 0; row < row_limit; ++row) ia[row + col * BlockSideLength] = l[row + offset_l] * r[col + offset_r]; else for (unsigned row = 0; row < row_limit; ++row) for (unsigned col = 0; col < col_limit; ++col) ia[row * BlockSideLength + col] = l[row + offset_l] * r[col + offset_r]; } // release bs_a.release(a, true); } // +-+ end vector-vector multiplication +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ }; // Adjust choose_level_for_feedable_sw, too! template <typename ValueType, unsigned BlockSideLength> const unsigned matrix_operations<ValueType, BlockSideLength>::strassen_winograd_base_case_size = 3; } // namespace matrix_local } // namespace stxxl #endif // !STXXL_CONTAINERS_MATRIX_ARITHMETIC_HEADER
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum black threshold value. % % o low_white: Define the minimum white threshold value. % % o high_white: Define the maximum white threshold value. % % o high_black: Define the maximum black threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=0; else q[i]=0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
residual_based_adjoint_bossak_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // #if !defined(KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED) #define KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED // System includes #include <vector> #include <string> #include <unordered_set> #include <functional> // External includes // Project includes #include "includes/define.h" #include "includes/checks.h" #include "includes/kratos_parameters.h" #include "solving_strategies/schemes/scheme.h" #include "response_functions/adjoint_response_function.h" #include "utilities/variable_utils.h" #include "utilities/indirect_scalar.h" #include "utilities/adjoint_extensions.h" #include "utilities/parallel_utilities.h" namespace Kratos { ///@name Kratos Classes ///@{ /// A scheme for dynamic adjoint equations, using Bossak time integration. /** * It can be used for either first- or second-order time derivatives. Elements * and conditions must provide a specialization of AdjointExtensions via their * data value container, which allows the scheme to operate independently of * the variable arrangements in the element or condition. */ template <class TSparseSpace, class TDenseSpace> class ResidualBasedAdjointBossakScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedAdjointBossakScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TSystemMatrixType SystemMatrixType; typedef typename BaseType::TSystemVectorType SystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::DofsArrayType DofsArrayType; ///@} ///@name Life Cycle ///@{ /// Constructor. ResidualBasedAdjointBossakScheme( Parameters Settings, AdjointResponseFunction::Pointer pResponseFunction ) : mpResponseFunction(pResponseFunction) { Parameters default_parameters(R"({ "name" : "adjoint_bossak", "scheme_type" : "bossak", "alpha_bossak" : -0.3 })"); Settings.ValidateAndAssignDefaults(default_parameters); mBossak.Alpha = Settings["alpha_bossak"].GetDouble(); } /// Destructor. ~ResidualBasedAdjointBossakScheme() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ int Check(const ModelPart& rModelPart) const override { KRATOS_TRY std::vector<const VariableData*> lambda2_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { rExtensions.GetFirstDerivativesVariables(rVec); }); std::vector<const VariableData*> lambda3_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetSecondDerivativesVariables(rVec); }); std::vector<const VariableData*> auxiliary_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetAuxiliaryVariables(rVec); }); KRATOS_ERROR_IF(lambda2_vars.size() != lambda3_vars.size()) << "First derivatives variable list and second derivatives " "variables list size mismatch.\n"; KRATOS_ERROR_IF(lambda2_vars.size() != auxiliary_vars.size()) << "First derivatives variable list and auxiliary variables list " "size mismatch.\n"; for (unsigned int i_var = 0; i_var < lambda2_vars.size(); ++i_var) { const auto& r_lambda2_variable_name = lambda2_vars[i_var]->Name(); const auto& r_lambda3_variable_name = lambda3_vars[i_var]->Name(); const auto& r_auxiliary_variable_name = auxiliary_vars[i_var]->Name(); if (KratosComponents<Variable<array_1d<double, 3>>>::Has(r_lambda2_variable_name)) { CheckVariables<array_1d<double, 3>>(rModelPart, r_lambda2_variable_name, r_lambda3_variable_name, r_auxiliary_variable_name); } else if (KratosComponents<Variable<double>>::Has(r_lambda2_variable_name)) { CheckVariables<double>(rModelPart, r_lambda2_variable_name, r_lambda3_variable_name, r_auxiliary_variable_name); } else { KRATOS_ERROR << "Unsupported variable type " << r_lambda2_variable_name << "."; } } return BaseType::Check(rModelPart); KRATOS_CATCH(""); } void Initialize(ModelPart& rModelPart) override { KRATOS_TRY; BaseType::Initialize(rModelPart); // Allocate auxiliary memory. int num_threads = OpenMPUtils::GetNumThreads(); mLeftHandSide.resize(num_threads); mResponseGradient.resize(num_threads); mFirstDerivsLHS.resize(num_threads); mFirstDerivsResponseGradient.resize(num_threads); mSecondDerivsLHS.resize(num_threads); mSecondDerivsResponseGradient.resize(num_threads); mAdjointValuesVector.resize(num_threads); mAdjointIndirectVector2.resize(num_threads); mAdjointIndirectVector3.resize(num_threads); mAuxAdjointIndirectVector1.resize(num_threads); VariableUtils().SetNonHistoricalVariableToZero(NUMBER_OF_NEIGHBOUR_ELEMENTS, rModelPart.Nodes()); rModelPart.GetProcessInfo()[BOSSAK_ALPHA] = mBossak.Alpha; KRATOS_CATCH(""); } void InitializeSolutionStep( ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); const auto& r_current_process_info = rModelPart.GetProcessInfo(); mBossak = CalculateBossakConstants(mBossak.Alpha, GetTimeStep(r_current_process_info)); this->CalculateNodeNeighbourCount(rModelPart); KRATOS_CATCH(""); } void FinalizeSolutionStep( ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); this->UpdateAuxiliaryVariable(rModelPart); KRATOS_CATCH(""); } void Update( ModelPart& rModelPart, DofsArrayType& rDofSet, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; // Update degrees of freedom: adjoint variables associated to the // residual of the physical problem. this->mpDofUpdater->UpdateDofs(rDofSet, rDx); // Update adjoint variables associated to time integration. this->UpdateTimeSchemeAdjoints(rModelPart); KRATOS_CATCH(""); } void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); rCurrentElement.GetValuesVector(mAdjointValuesVector[k]); const auto local_size = mAdjointValuesVector[k].size(); if (rRHS_Contribution.size() != local_size) { rRHS_Contribution.resize(local_size, false); } if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size) { rLHS_Contribution.resize(local_size, local_size, false); } this->CheckAndResizeThreadStorage(local_size); this->CalculateGradientContributions(rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateFirstDerivativeContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateSecondDerivativeContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculatePreviousTimeStepContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateResidualLocalContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); rCurrentElement.EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateLHSContribution( Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; CalculateSystemContributions(rCurrentElement, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Condition::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); rCurrentCondition.GetValuesVector(mAdjointValuesVector[k]); const auto local_size = mAdjointValuesVector[k].size(); if (rRHS_Contribution.size() != local_size) { rRHS_Contribution.resize(local_size, false); } if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size) { rLHS_Contribution.resize(local_size, local_size, false); } this->CheckAndResizeThreadStorage(local_size); this->CalculateGradientContributions(rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateFirstDerivativeContributions( rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateSecondDerivativeContributions( rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); // It is not required to call CalculatePreviousTimeStepContributions here again // since, the previous time step contributions from conditions are stored in variables // mentioned in AdjointExtensions, and they are added via CalculateSystemContributions<ElementType> // method. this->CalculateResidualLocalContributions( rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); rCurrentCondition.EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateLHSContribution( Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, Condition::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; CalculateSystemContributions(rCurrentCondition, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedAdjointBossakScheme"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected member Variables ///@{ struct BossakConstants { double Alpha; double Beta; double Gamma; double C0; double C1; double C2; double C3; double C4; double C5; double C6; double C7; }; ///@} ///@name Protected Operations ///@{ /** * @brief Calculates elemental residual * * \[ * \underline{F} = \underline{F} - \mathbf{\underline{K}}\underline{\lambda}_1 * \] * * @param rCurrentElement Current element * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ virtual void CalculateResidualLocalContributions( Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityResidualLocalContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); } /** * @brief Calculates condition residual * * \[ * \underline{F} = \underline{F} - \mathbf{\underline{K}}\underline{\lambda}_1 * \] * * @param rCurrentCondition Current condition * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ virtual void CalculateResidualLocalContributions( Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityResidualLocalContributions( rCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); } /** * @brief Calculate time scheme contributions from elements * * @param rElement * @param rAdjointTimeSchemeValues2 * @param rAdjointTimeSchemeValues3 * @param rCurrentProcessInfo */ virtual void CalculateTimeSchemeContributions( Element& rElement, LocalSystemVectorType& rAdjointTimeSchemeValues2, LocalSystemVectorType& rAdjointTimeSchemeValues3, AdjointResponseFunction& rAdjointResponseFunction, const BossakConstants& rBossakConstants, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityTimeSchemeContributions(rElement, rAdjointTimeSchemeValues2, rAdjointTimeSchemeValues3, rCurrentProcessInfo); } /** * @brief Calculates time scheme contributions from conditions * * @param rCondition * @param rAdjointTimeSchemeValues2 * @param rAdjointTimeSchemeValues3 * @param rCurrentProcessInfo */ virtual void CalculateTimeSchemeContributions( Condition& rCondition, LocalSystemVectorType& rAdjointTimeSchemeValues2, LocalSystemVectorType& rAdjointTimeSchemeValues3, AdjointResponseFunction& rAdjointResponseFunction, const BossakConstants& rBossakConstants, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityTimeSchemeContributions(rCondition, rAdjointTimeSchemeValues2, rAdjointTimeSchemeValues3, rCurrentProcessInfo); } /** * @brief Calculates auxiliary variable contributions from elements * * @param rElement * @param rAdjointAuxiliaryValues * @param rCurrentProcessInfo */ virtual void CalculateAuxiliaryVariableContributions( Element& rElement, LocalSystemVectorType& rAdjointAuxiliaryValues, AdjointResponseFunction& rAdjointResponseFunction, const BossakConstants& rBossakConstants, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityAuxiliaryVariableContributions( rElement, rAdjointAuxiliaryValues, rCurrentProcessInfo); } /** * @brief Calculates auxiliary contributions from conditions * * @param rCondition * @param rAdjointAuxiliaryValues * @param rCurrentProcessInfo */ virtual void CalculateAuxiliaryVariableContributions( Condition& rCondition, LocalSystemVectorType& rAdjointAuxiliaryValues, AdjointResponseFunction& rAdjointResponseFunction, const BossakConstants& rBossakConstants, const ProcessInfo& rCurrentProcessInfo) { CalculateEntityAuxiliaryVariableContributions( rCondition, rAdjointAuxiliaryValues, rCurrentProcessInfo); } ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ BossakConstants mBossak; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); AdjointResponseFunction::Pointer mpResponseFunction; std::vector<LocalSystemMatrixType> mLeftHandSide; std::vector<LocalSystemVectorType> mResponseGradient; std::vector<LocalSystemMatrixType> mFirstDerivsLHS; std::vector<LocalSystemVectorType> mFirstDerivsResponseGradient; std::vector<LocalSystemMatrixType> mSecondDerivsLHS; std::vector<LocalSystemVectorType> mSecondDerivsResponseGradient; std::vector<LocalSystemVectorType> mAdjointValuesVector; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector2; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector3; std::vector<std::vector<IndirectScalar<double>>> mAuxAdjointIndirectVector1; ///@} ///@name Private Operations ///@{ /** * @brief Calculates entity residual * * \[ * \underline{F} = \underline{F} - \mathbf{\underline{K}}\underline{\lambda}_1 * \] * * @tparam TEntityType * @param rCurrentEntity Current Entity * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ template<class TEntityType> void CalculateEntityResidualLocalContributions( TEntityType& rCurrentEntity, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); auto& r_residual_adjoint = mAdjointValuesVector[k]; rCurrentEntity.GetValuesVector(r_residual_adjoint); noalias(rRHS_Contribution) -= prod(rLHS_Contribution, r_residual_adjoint); } /** * @brief Calculates entity first derivative contributions for adjoint system * * \[ * \mathbf{\underline{K}} = \mathbf{\underline{K}} + \frac{\partial \underline{R}^n}{\partial \underline{w}^n} \\ * \underline{F} = \underline{F} - \frac{\partial J^n}{\partial \underline{w}^n} * \] * * @tparam TEntityType * @param rCurrentEntity Current entity * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ template<class TEntityType> void CalculateGradientContributions( TEntityType& rCurrentEntity, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentEntity.CalculateLeftHandSide(mLeftHandSide[k], rCurrentProcessInfo); this->mpResponseFunction->CalculateGradient( rCurrentEntity, mLeftHandSide[k], mResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) = mLeftHandSide[k]; noalias(rRHS_Contribution) = -1. * mResponseGradient[k]; } /** * @brief Calculates element first derivative contributions to adjoint system * * \[ * \mathbf{\underline{K}} = \mathbf{\underline{K}} + \frac{\gamma}{\beta \Delta t} \frac{\partial \underline{R}^n}{\partial \underline{\dot{w}}^n} \\ * \underline{F} = \underline{F} - \frac{\gamma}{\beta \Delta t} \frac{\partial J^n}{\partial \underline{\dot{w}}^n} * \] * * @tparam TEntityType * @param rCurrentEntity Current entity * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ template<class TEntityType> void CalculateFirstDerivativeContributions( TEntityType& rCurrentEntity, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentEntity.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rCurrentProcessInfo); mpResponseFunction->CalculateFirstDerivativesGradient( rCurrentEntity, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C6 * mFirstDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C6 * mFirstDerivsResponseGradient[k]; } /** * @brief Calculates element second derivative contributions for adjoint system * * \[ * \mathbf{\underline{K}} = \mathbf{\underline{K}} + \frac{1 - \alpha}{\beta\Delta t^2}\frac{\partial \underline{R}^n}{\partial \underline{\ddot{w}}^n} \\ * \underline{F} = \underline{F} - \frac{1}{\beta\Delta t^2}\frac{\partial J^n}{\partial \underline{\ddot{w}}^n} * \] * * @tparam TEntityType * @param rCurrentEntity Current entity * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ template<class TEntityType> void CalculateSecondDerivativeContributions( TEntityType& rCurrentEntity, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentEntity.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rCurrentProcessInfo); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); this->mpResponseFunction->CalculateSecondDerivativesGradient( rCurrentEntity, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C7 * mSecondDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C7 * mSecondDerivsResponseGradient[k]; } /** * @brief Calculates previous time step contributions from elements to adjoint system * * No need to use again conditions version of this since elements includes condition nodes as well. * Therefore, this will add automatically condition contributions as well. * * \underline{F} = * \underline{F} * - \frac{1}{\beta\Delta t^2}\left[\frac{\partial \underline{R}^{n+1}}{\underline{\ddot{w}}^n}\right]^T\underline{\lambda}_1^{n+1} * - \frac{1}{\beta\Delta t^2}\frac{\partial J^{n+1}}{\underline{\ddot{w}}^n} * + \frac{\beta - \gamma\left(\gamma + \frac{1}{2}\right)}{\beta^2\Delta t}\underline{\lambda}_2^{n+1} * - \frac{\gamma + \frac{1}{2}}{\beta^2\Delta t^2}\underline{\lambda}_3^{n+1} * * @param rCurrentElement Current element * @param rLHS_Contribution Left hand side matrix (i.e. $\mathbf{\underline{K}}$) * @param rRHS_Contribution Right hand side vector (i.e. $\underline{F}$) * @param rCurrentProcessInfo Current process info */ void CalculatePreviousTimeStepContributions( Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { const auto& r_geometry = rCurrentElement.GetGeometry(); const auto k = OpenMPUtils::ThisThread(); auto& r_extensions = *rCurrentElement.GetValue(ADJOINT_EXTENSIONS); unsigned local_index = 0; for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { auto& r_node = r_geometry[i_node]; r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 1); r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 1); r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1); const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { rRHS_Contribution[local_index] += weight * (mBossak.C7 * mAuxAdjointIndirectVector1[k][d] + mBossak.C4 * mAdjointIndirectVector2[k][d] + mBossak.C5 * mAdjointIndirectVector3[k][d]); ++local_index; } } } /** * @brief Calculates entity time scheme contributions as depicted. * * \[ * rAdjointTimeSchemeValues2 = * - \frac{\partial J^{n}}{\partial \underline{\dot{w}}^n} * - \left[\frac{\partial \underline{R}^{n}}{\partial \underline{\dot{w}}}\right]^T\underline{\lambda}_1^{n+1} * \] * \[ * rAdjointTimeSchemeValues3 = * - \frac{\partial J^{n}}{\partial \underline{\ddot{w}}^n} * - \left(1-\alpha\right)\left[\frac{\partial \underline{R}^{n}}{\partial \underline{\ddot{w}}^n}\right]^T\underline{\lambda}_1^{n+1} * \] * * @tparam TEntityType * @param rCurrentEntity * @param rAdjointTimeSchemeValues2 * @param rAdjointTimeSchemeValues3 * @param rProcessInfo */ template<class TEntityType> void CalculateEntityTimeSchemeContributions( TEntityType& rCurrentEntity, LocalSystemVectorType& rAdjointTimeSchemeValues2, LocalSystemVectorType& rAdjointTimeSchemeValues3, const ProcessInfo& rProcessInfo) { KRATOS_TRY const int k = OpenMPUtils::ThisThread(); rCurrentEntity.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); /// starting to build residual for next time step calculations rCurrentEntity.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rProcessInfo); this->mpResponseFunction->CalculateFirstDerivativesGradient( rCurrentEntity, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], rProcessInfo); rCurrentEntity.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rProcessInfo); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); this->mpResponseFunction->CalculateSecondDerivativesGradient( rCurrentEntity, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], rProcessInfo); if (rAdjointTimeSchemeValues2.size() != mFirstDerivsResponseGradient[k].size()) rAdjointTimeSchemeValues2.resize(mFirstDerivsResponseGradient[k].size(), false); noalias(rAdjointTimeSchemeValues2) = -mFirstDerivsResponseGradient[k] - prod(mFirstDerivsLHS[k], mAdjointValuesVector[k]); if (rAdjointTimeSchemeValues3.size() != mSecondDerivsResponseGradient[k].size()) rAdjointTimeSchemeValues3.resize(mSecondDerivsResponseGradient[k].size(), false); noalias(rAdjointTimeSchemeValues3) = -mSecondDerivsResponseGradient[k] - prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]); KRATOS_CATCH(""); } /** * @brief Calculates contributions from each entity for auxiliary variable as depicted * * rAdjointAuxiliaryValues = * - \frac{\partial J^{n+1}}{\partial \underline{\ddot{w}}^n} * - \alpha \left[\frac{\partial \underline{R}^{n+1}}{\partial \underline{\ddot{w}}^n}\right]^T\underline{\lambda}_1^{n+1} * * @tparam TEntityType * @param rCurrentEntity * @param rAdjointAuxiliaryValues * @param rProcessInfo */ template <class TEntityType> void CalculateEntityAuxiliaryVariableContributions( TEntityType& rCurrentEntity, LocalSystemVectorType& rAdjointAuxiliaryValues, const ProcessInfo& rProcessInfo) { KRATOS_TRY const int k = OpenMPUtils::ThisThread(); rCurrentEntity.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); rCurrentEntity.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rProcessInfo); mSecondDerivsLHS[k] *= mBossak.Alpha; this->mpResponseFunction->CalculateSecondDerivativesGradient( rCurrentEntity, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], rProcessInfo); if (rAdjointAuxiliaryValues.size() != mSecondDerivsLHS[k].size1()) rAdjointAuxiliaryValues.resize(mSecondDerivsLHS[k].size1(), false); noalias(rAdjointAuxiliaryValues) = prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]) + mSecondDerivsResponseGradient[k]; KRATOS_CATCH(""); } void CalculateNodeNeighbourCount(ModelPart& rModelPart) { // Calculate number of neighbour elements for each node. VariableUtils().SetNonHistoricalVariableToZero(NUMBER_OF_NEIGHBOUR_ELEMENTS, rModelPart.Nodes()); block_for_each(rModelPart.Elements(), [&](ModelPart::ElementType& rElement) { auto& r_geometry = rElement.GetGeometry(); for (unsigned j = 0; j < r_geometry.PointsNumber(); ++j) { double& r_num_neighbour = r_geometry[j].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); #pragma omp atomic r_num_neighbour += 1.0; } }); rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS); } void UpdateTimeSchemeAdjoints(ModelPart& rModelPart) { KRATOS_TRY; std::vector<const VariableData*> lambda2_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { rExtensions.GetFirstDerivativesVariables(rVec); }); std::vector<const VariableData*> lambda3_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetSecondDerivativesVariables(rVec); }); std::vector<const VariableData*> auxiliary_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetAuxiliaryVariables(rVec); }); SetToZero_AdjointVars(lambda2_vars, rModelPart.Nodes()); SetToZero_AdjointVars(lambda3_vars, rModelPart.Nodes()); const auto& r_process_info = rModelPart.GetProcessInfo(); UpdateEntityTimeSchemeContributions(rModelPart.Elements(), r_process_info); UpdateEntityTimeSchemeContributions(rModelPart.Conditions(), r_process_info); // Finalize global assembly Assemble_AdjointVars(lambda2_vars, rModelPart.GetCommunicator()); Assemble_AdjointVars(lambda3_vars, rModelPart.GetCommunicator()); for (unsigned int i_var = 0; i_var < lambda2_vars.size(); ++i_var) { const auto& r_lambda2_variable_name = lambda2_vars[i_var]->Name(); const auto& r_lambda3_variable_name = lambda3_vars[i_var]->Name(); const auto& r_auxiliary_variable_name = auxiliary_vars[i_var]->Name(); if (KratosComponents<Variable<array_1d<double, 3>>>::Has(r_lambda2_variable_name)) { UpdateTimeSchemeVariablesFromOldContributions<array_1d<double, 3>>( rModelPart.Nodes(), r_lambda2_variable_name, r_lambda3_variable_name, r_auxiliary_variable_name); } else if (KratosComponents<Variable<double>>::Has(r_lambda2_variable_name)) { UpdateTimeSchemeVariablesFromOldContributions<double>( rModelPart.Nodes(), r_lambda2_variable_name, r_lambda3_variable_name, r_auxiliary_variable_name); } else { KRATOS_ERROR << "Unsupported variable type " << r_lambda2_variable_name << "."; } } KRATOS_CATCH(""); } /** * @brief Updates time scheme variables in nodes of model part * * @tparam TEntityContainerType * @param rEntityContainer * @param rProcessInfo */ template <class TEntityContainerType> void UpdateEntityTimeSchemeContributions( TEntityContainerType& rEntityContainer, const ProcessInfo& rProcessInfo) { KRATOS_TRY const int number_of_elements = rEntityContainer.size(); Vector adjoint2_aux, adjoint3_aux; #pragma omp parallel for private(adjoint2_aux, adjoint3_aux) for (int i = 0; i < number_of_elements; ++i) { auto& r_entity = *(rEntityContainer.begin() + i); const int k = OpenMPUtils::ThisThread(); this->CalculateTimeSchemeContributions( r_entity, adjoint2_aux, adjoint3_aux, *this->mpResponseFunction, mBossak, rProcessInfo); auto& r_extensions = *r_entity.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; auto& r_geometry = r_entity.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 0); r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 0); auto& r_node = r_geometry[i_node]; r_node.SetLock(); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { mAdjointIndirectVector2[k][d] += adjoint2_aux[local_index]; mAdjointIndirectVector3[k][d] += adjoint3_aux[local_index]; ++local_index; } r_node.UnSetLock(); } } KRATOS_CATCH(""); } /** * @brief Update nodal variables with contributions from previous time step adjoint variables * * @tparam TDataType * @param rNodes * @param rLambda2VariableName * @param rLambda3VariableName * @param rAuxiliaryVariableName */ template<class TDataType> void UpdateTimeSchemeVariablesFromOldContributions( ModelPart::NodesContainerType& rNodes, const std::string& rLambda2VariableName, const std::string& rLambda3VariableName, const std::string& rAuxiliaryVariableName) { KRATOS_TRY const auto& r_lambda2_variable = KratosComponents<Variable<TDataType>>::Get(rLambda2VariableName); const auto& r_lambda3_variable = KratosComponents<Variable<TDataType>>::Get(rLambda3VariableName); const auto& r_auxiliary_variable = KratosComponents<Variable<TDataType>>::Get(rAuxiliaryVariableName); block_for_each(rNodes, [&](ModelPart::NodeType& rNode) { const TDataType& r_old_lambda2_value = rNode.FastGetSolutionStepValue(r_lambda2_variable, 1); const TDataType& r_old_lambda3_value = rNode.FastGetSolutionStepValue(r_lambda3_variable, 1); TDataType& r_lambda2_value = rNode.FastGetSolutionStepValue(r_lambda2_variable); r_lambda2_value += r_old_lambda2_value * mBossak.C0; r_lambda2_value += r_old_lambda3_value * mBossak.C1; TDataType& r_lambda3_value = rNode.FastGetSolutionStepValue(r_lambda3_variable); r_lambda3_value += r_old_lambda2_value * mBossak.C2; r_lambda3_value += r_old_lambda3_value * mBossak.C3; r_lambda3_value += rNode.FastGetSolutionStepValue(r_auxiliary_variable, 1); }); KRATOS_CATCH(""); } /** * @brief Update auxiliary variable to be used in next time step * * @param rModelPart */ void UpdateAuxiliaryVariable(ModelPart& rModelPart) { KRATOS_TRY; std::vector<const VariableData*> aux_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rOut) { return rExtensions.GetAuxiliaryVariables(rOut); }); SetToZero_AdjointVars(aux_vars, rModelPart.Nodes()); const auto& r_process_info = rModelPart.GetProcessInfo(); // Loop over elements to assemble the remaining terms UpdateEntityAuxiliaryVariableContributions(rModelPart.Elements(), r_process_info); // Loop over conditions to assemble the remaining terms UpdateEntityAuxiliaryVariableContributions(rModelPart.Conditions(), r_process_info); // Finalize global assembly Assemble_AdjointVars(aux_vars, rModelPart.GetCommunicator()); KRATOS_CATCH(""); } /** * @brief Updates auxiliary variables in the model part * * @tparam TEntityContainerType * @param rEntityContainer * @param rProcessInfo */ template <class TEntityContainerType> void UpdateEntityAuxiliaryVariableContributions( TEntityContainerType& rEntityContainer, const ProcessInfo& rProcessInfo) { KRATOS_TRY const int number_of_entities = rEntityContainer.size(); Vector aux_adjoint_vector; #pragma omp parallel for private(aux_adjoint_vector) for (int i = 0; i < number_of_entities; ++i) { auto& r_entity = *(rEntityContainer.begin() + i); const int k = OpenMPUtils::ThisThread(); this->CalculateAuxiliaryVariableContributions( r_entity, aux_adjoint_vector, *this->mpResponseFunction, mBossak, rProcessInfo); auto& r_extensions = *r_entity.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; auto& r_geometry = r_entity.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { auto& r_node = r_geometry[i_node]; r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 0); r_node.SetLock(); for (unsigned d = 0; d < mAuxAdjointIndirectVector1[k].size(); ++d) { mAuxAdjointIndirectVector1[k][d] -= aux_adjoint_vector[local_index]; ++local_index; } r_node.UnSetLock(); } } KRATOS_CATCH(""); } /** * @brief Check for variable types * * @tparam TDataType * @param rModelPart * @param rLambda2VariableName * @param rLambda3VariableName * @param rAuxiliaryVariableName */ template<class TDataType> void CheckVariables( const ModelPart& rModelPart, const std::string& rLambda2VariableName, const std::string& rLambda3VariableName, const std::string& rAuxiliaryVariableName) const { KRATOS_TRY KRATOS_ERROR_IF(!KratosComponents<Variable<TDataType>>::Has(rLambda2VariableName)) << "Adjoint variable " << rLambda2VariableName << " is not found in variable list with required type.\n"; KRATOS_ERROR_IF(!KratosComponents<Variable<TDataType>>::Has(rLambda3VariableName)) << "Adjoint variable " << rLambda3VariableName << " is not found in variable list with required type.\n"; KRATOS_ERROR_IF(!KratosComponents<Variable<TDataType>>::Has(rAuxiliaryVariableName)) << "Adjoint variable " << rAuxiliaryVariableName << " is not found in variable list with required type.\n"; const auto& r_lambda2_variable = KratosComponents<Variable<TDataType>>::Get(rLambda2VariableName); const auto& r_lambda3_variable = KratosComponents<Variable<TDataType>>::Get(rLambda3VariableName); const auto& r_auxiliary_variable = KratosComponents<Variable<TDataType>>::Get(rAuxiliaryVariableName); KRATOS_ERROR_IF(!rModelPart.HasNodalSolutionStepVariable(r_lambda2_variable)) << "Lambda 2 Variable " << rLambda2VariableName << " not found in nodal solution step variables list of " << rModelPart.Name() << ".\n"; KRATOS_ERROR_IF(!rModelPart.HasNodalSolutionStepVariable(r_lambda3_variable)) << "Lambda 3 Variable " << rLambda3VariableName << " not found in nodal solution step variables list of " << rModelPart.Name() << ".\n"; KRATOS_ERROR_IF(!rModelPart.HasNodalSolutionStepVariable(r_auxiliary_variable)) << "Auxiliary Variable " << rAuxiliaryVariableName << " not found in nodal solution step variables list of " << rModelPart.Name() << ".\n"; KRATOS_CATCH(""); } void CheckAndResizeThreadStorage(unsigned SystemSize) { const int k = OpenMPUtils::ThisThread(); if (mLeftHandSide[k].size1() != SystemSize || mLeftHandSide[k].size2() != SystemSize) { mLeftHandSide[k].resize(SystemSize, SystemSize, false); } if (mFirstDerivsLHS[k].size1() != SystemSize || mFirstDerivsLHS[k].size2() != SystemSize) { mFirstDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mSecondDerivsLHS[k].size1() != SystemSize || mSecondDerivsLHS[k].size2() != SystemSize) { mSecondDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mResponseGradient[k].size() != SystemSize) { mResponseGradient[k].resize(SystemSize, false); } if (mFirstDerivsResponseGradient[k].size() != SystemSize) { mFirstDerivsResponseGradient[k].resize(SystemSize, false); } if (mSecondDerivsResponseGradient[k].size() != SystemSize) { mSecondDerivsResponseGradient[k].resize(SystemSize, false); } } static BossakConstants CalculateBossakConstants(double Alpha, double DeltaTime) { BossakConstants bc; bc.Alpha = Alpha; bc.Beta = 0.25 * (1.0 - bc.Alpha) * (1.0 - bc.Alpha); bc.Gamma = 0.5 - bc.Alpha; bc.C0 = 1.0 - bc.Gamma / bc.Beta; bc.C1 = -1.0 / (bc.Beta * DeltaTime); bc.C2 = (1.0 - 0.5 * bc.Gamma / bc.Beta) * DeltaTime; bc.C3 = (1.0 - 0.5 / bc.Beta); bc.C4 = (bc.Beta - bc.Gamma * (bc.Gamma + 0.5)) / (DeltaTime * bc.Beta * bc.Beta); bc.C5 = -1.0 * (bc.Gamma + 0.5) / (DeltaTime * DeltaTime * bc.Beta * bc.Beta); bc.C6 = bc.Gamma / (bc.Beta * DeltaTime); bc.C7 = 1.0 / (DeltaTime * DeltaTime * bc.Beta); return bc; } static double GetTimeStep(const ProcessInfo& rCurrentProcessInfo) { const ProcessInfo& r_last_process_info = rCurrentProcessInfo.GetPreviousSolutionStepInfo(1); // Note: solution is backwards in time, but we still want a positive // time step // (it is the time step in the "forward" Bossak scheme). double time_step = r_last_process_info.GetValue(TIME) - rCurrentProcessInfo.GetValue(TIME); KRATOS_ERROR_IF(time_step <= 0.0) << "Backwards in time solution is not decreasing time from last " "step." << std::endl; return time_step; } struct Hash { std::size_t operator()(const VariableData* const& p) const { return p->Key(); } }; struct Pred { bool operator()(const VariableData* const l, const VariableData* const r) const { return *l == *r; } }; // Gathers variables needed for assembly. static std::vector<const VariableData*> GatherVariables( const ModelPart::ElementsContainerType& rElements, std::function<void(const AdjointExtensions&, std::vector<const VariableData*>&)> GetLocalVars) { KRATOS_TRY; const int num_threads = OpenMPUtils::GetNumThreads(); std::vector<const VariableData*> local_vars; std::vector<std::unordered_set<const VariableData*, Hash, Pred>> thread_vars(num_threads); #pragma omp parallel for private(local_vars) for (int i = 0; i < static_cast<int>(rElements.size()); ++i) { auto& r_element = *(rElements.begin() + i); GetLocalVars(*r_element.GetValue(ADJOINT_EXTENSIONS), local_vars); const int k = OpenMPUtils::ThisThread(); thread_vars[k].insert(local_vars.begin(), local_vars.end()); } std::unordered_set<const VariableData*, Hash, Pred> all_vars; for (int i = 0; i < num_threads; ++i) { all_vars.insert(thread_vars[i].begin(), thread_vars[i].end()); } return std::vector<const VariableData*>{all_vars.begin(), all_vars.end()}; KRATOS_CATCH(""); } static void SetToZero_AdjointVars(const std::vector<const VariableData*>& rVariables, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } static void Assemble_AdjointVars(const std::vector<const VariableData*>& rVariables, Communicator& rComm) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedAdjointBossakScheme */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED defined */
gsrb.omptask.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #if defined(GSRB_FP) #warning Overriding default GSRB implementation and using pre-computed 1.0/0.0 FP array for Red-Black to facilitate vectorization... #elif defined(GSRB_STRIDE2) #if defined(GSRB_OOP) #warning Overriding default GSRB implementation and using out-of-place and stride-2 accesses to minimize the number of flops #else #warning Overriding default GSRB implementation and using stride-2 accesses to minimize the number of flops #endif #elif defined(GSRB_BRANCH) #if defined(GSRB_OOP) #warning Overriding default GSRB implementation and using out-of-place implementation with an if-then-else on loop indices... #else #warning Overriding default GSRB implementation and using if-then-else on loop indices... #endif #else #define GSRB_STRIDE2 // default implementation #endif //------------------------------------------------------------------------------------------------------------------------------ #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) //------------------------------------------------------------------------------------------------------------------------------ void smooth(level_type * level, int x_id, int rhs_id, double a, double b){ int s; for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps per GSRB smooth // exchange the ghost zone... #ifdef GSRB_OOP // out-of-place GSRB ping pongs between x and VECTOR_TEMP if((s&1)==0){exchange_boundary(level, x_id,stencil_get_shape());apply_BCs(level, x_id,stencil_get_shape());} else{exchange_boundary(level,VECTOR_TEMP,stencil_get_shape());apply_BCs(level,VECTOR_TEMP,stencil_get_shape());} #else // in-place GSRB only operates on x exchange_boundary(level, x_id,stencil_get_shape());apply_BCs(level, x_id,stencil_get_shape()); #endif // apply the smoother... double _timeStart = getTime(); #pragma omp parallel if( (level->num_my_boxes>1) || (level->box_dim > BLOCKCOPY_TILE_J) || (level->box_dim > BLOCKCOPY_TILE_K) ) { int box; #pragma omp for private(box) nowait schedule(static,1) // omp for to maximize task injection for(box=0;box<level->num_my_boxes;box++){ // loop over all boxes this process owns... int kk,jj; for(kk=0;kk<level->box_dim;kk+=BLOCKCOPY_TILE_K){ for(jj=0;jj<level->box_dim;jj+=BLOCKCOPY_TILE_J){ #pragma omp task firstprivate(kk,jj,box) if( (level->box_dim > BLOCKCOPY_TILE_J) || (level->box_dim > BLOCKCOPY_TILE_K) ) { // either define these variables here, or make them all firstprivate const double h2inv = 1.0/(level->h*level->h); const int ghosts = level->box_ghosts; const int jStride = level->box_jStride; const int kStride = level->box_kStride; const int color000 = (level->my_boxes[box].low.i^level->my_boxes[box].low.j^level->my_boxes[box].low.k^s)&1; // is element 000 red or black on *THIS* sweep const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride); #ifdef VECTOR_ALPHA const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride); #endif const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride); const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride); #ifdef GSRB_OOP const double * __restrict__ x_n; double * __restrict__ x_np1; if((s&1)==0){x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); x_np1 = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride);} else{x_n = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride); x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride);} #else const double * __restrict__ x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double * __restrict__ x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point #endif // - - - - const int khi = MIN(kk+BLOCKCOPY_TILE_K,level->box_dim); const int jhi = MIN(jj+BLOCKCOPY_TILE_J,level->box_dim); int i,j,k; #if defined(GSRB_FP) for(k=kk;k<khi;k++){ for(j=jj;j<jhi;j++){ const double * __restrict__ RedBlack = level->RedBlack_FP + ghosts*(1+jStride) + kStride*((k^color000)&0x1); for(i=0;i<level->box_dim;i++){ int ij = i + j*jStride; int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(x_n); x_np1[ijk] = x_n[ijk] + RedBlack[ij]*Dinv[ijk]*(rhs[ijk]-Ax); //x_np1[ijk] = ((i^j^k^color000)&1) ? x_n[ijk] : x_n[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); }}} // i,j,k #elif defined(GSRB_STRIDE2) for(k=kk;k<khi;k++){ for(j=jj;j<jhi;j++){ #ifdef GSRB_OOP // out-of-place must copy old value... for(i=0;i<level->box_dim;i++){ int ijk = i + j*jStride + k*kStride; x_np1[ijk] = x_n[ijk]; } // i copy #endif for(i=((j^k^color000)&1);i<level->box_dim;i+=2){ // stride-2 GSRB int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(x_n); x_np1[ijk] = x_n[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); } // i stencil }} // j,k #elif defined(GSRB_BRANCH) for(k=kk;k<khi;k++){ for(j=jj;j<jhi;j++){ for(i=0;i<level->box_dim;i++){ int ijk = i + j*jStride + k*kStride; if((i^j^k^color000^1)&1){ // looks very clean when [0] is i,j,k=0,0,0 double Ax = apply_op_ijk(x_n); x_np1[ijk] = x_n[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); #ifdef GSRB_OOP }else{ x_np1[ijk] = x_n[ijk]; // copy old value when sweep color != cell color #endif } }}} // i,j,k #else #error no GSRB implementation was specified #endif }}} // JJ,KK,task } // boxes } // parallel level->timers.smooth += (double)(getTime()-_timeStart); } // s-loop } //------------------------------------------------------------------------------------------------------------------------------
DRB004-antidep2-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two nested loops with loop-carried anti-dependence on the outer level. This is a variable-length array version in C99. Data race pair: a[i][j]@70:7 vs. a[i+1][j]@70:18 */ #include <stdlib.h> #include <stdio.h> int main(int argc,char *argv[]) { int i, j; int len = 20; if (argc>1) len = atoi(argv[1]); double a[len][len]; #pragma omp parallel for private(i ,j ) for (i=0; i< len; i++) #pragma omp parallel for private(j ) for (j=0; j<len; j++) a[i][j] = 0.5; for (i = 0; i < len - 1; i += 1) { #pragma omp parallel for for (j = 0; j < len ; j += 1) { a[i][j] += a[i + 1][j]; } } for (i=0; i< len; i++) for (j=0; j<len; j++) printf("%lf\n",a[i][j]); return 0; }
pyfr_driver_asp_reg.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <stdlib.h> #include <assert.h> #include <stdio.h> #include <math.h> #if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL) # include <mkl.h> #else /* prototypes for GEMM */ void dgemm_(const char*, const char*, const int*, const int*, const int*, const double*, const double*, const int*, const double*, const int*, const double*, double*, const int*); #endif #define REPS 100 #define REALTYPE double int my_csr_reader( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, REALTYPE** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return -1; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return -1; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) && 0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)*o_row_count + 1)); *o_values = (REALTYPE*) malloc(sizeof(double) * ((size_t)*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return -1; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*((size_t)*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*((size_t)*o_element_count)); memset(*o_values, 0, sizeof(double)*((size_t)*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*((size_t)*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return -1; } /* now we read the actual content */ } else { unsigned int l_row, l_column; REALTYPE l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return -1; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return -1; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { assert(NULL != l_row_idx_id); if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } return 0; } int main(int argc, char* argv[]) { char* l_csr_file; REALTYPE* l_a_sp; unsigned int* l_rowptr; unsigned int* l_colidx; unsigned int l_rowcount, l_colcount, l_elements; REALTYPE* l_a_dense; REALTYPE* l_b; REALTYPE* l_c_betaone; REALTYPE* l_c_betazero; REALTYPE* l_c_gold_betaone; REALTYPE* l_c_gold_betazero; REALTYPE* l_c_dense_betaone; REALTYPE* l_c_dense_betazero; REALTYPE l_max_error = 0.0; int l_m; int l_n; int l_k; int l_i; int l_j; int l_z; int l_elems; int l_reps; int l_n_block; libxsmm_timer_tickint l_start, l_end; double l_total; double alpha = 1.0; double beta = 1.0; char trans = 'N'; libxsmm_dfsspmdm* gemm_op_betazero = NULL; libxsmm_dfsspmdm* gemm_op_betaone = NULL; if (argc != 4) { fprintf( stderr, "need csr-filename N reps!\n" ); exit(-1); } /* read sparse A */ l_csr_file = argv[1]; l_n = atoi(argv[2]); l_reps = atoi(argv[3]); if (my_csr_reader( l_csr_file, &l_rowptr, &l_colidx, &l_a_sp, &l_rowcount, &l_colcount, &l_elements ) != 0 ) { exit(-1); } l_m = l_rowcount; l_k = l_colcount; printf("CSR matrix data structure we just read:\n"); printf("rows: %u, columns: %u, elements: %u\n", l_rowcount, l_colcount, l_elements); /* allocate dense matrices */ l_a_dense = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_k * l_m, 64); l_b = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_k * l_n, 64); l_c_betazero = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64); l_c_betaone = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64); l_c_gold_betazero = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64); l_c_gold_betaone = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64); l_c_dense_betazero = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64); l_c_dense_betaone = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64); /* touch B */ for ( l_i = 0; l_i < l_k*l_n; l_i++) { l_b[l_i] = (REALTYPE)libxsmm_rng_f64(); } /* touch dense A */ for ( l_i = 0; l_i < l_k*l_m; l_i++) { l_a_dense[l_i] = (REALTYPE)0.0; } /* init dense A using sparse A */ for ( l_i = 0; l_i < l_m; l_i++ ) { l_elems = l_rowptr[l_i+1] - l_rowptr[l_i]; for ( l_z = 0; l_z < l_elems; l_z++ ) { l_a_dense[(l_i*l_k)+l_colidx[l_rowptr[l_i]+l_z]] = l_a_sp[l_rowptr[l_i]+l_z]; } } /* touch C */ for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_gold_betaone[l_i] = (REALTYPE)libxsmm_rng_f64(); } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_betaone[l_i] = l_c_gold_betaone[l_i]; } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_dense_betaone[l_i] = l_c_gold_betaone[l_i]; } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_betazero[l_i] = l_c_betaone[l_i]; } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_gold_betazero[l_i] = l_c_gold_betaone[l_i]; } for ( l_i = 0; l_i < l_m*l_n; l_i++) { l_c_dense_betazero[l_i] = l_c_dense_betaone[l_i]; } /* setting up fsspmdm */ l_n_block = 48; beta = 0.0; gemm_op_betazero = libxsmm_dfsspmdm_create( l_m, l_n_block, l_k, l_k, l_n, l_n, 1.0, beta, 1, l_a_dense ); beta = 1.0; gemm_op_betaone = libxsmm_dfsspmdm_create( l_m, l_n_block, l_k, l_k, l_n, l_n, 1.0, beta, 0, l_a_dense ); /* compute golden results */ printf("computing golden solution...\n"); for ( l_j = 0; l_j < l_n; l_j++ ) { for (l_i = 0; l_i < l_m; l_i++ ) { l_elems = l_rowptr[l_i+1] - l_rowptr[l_i]; l_c_gold_betazero[(l_n*l_i) + l_j] = 0.0; for (l_z = 0; l_z < l_elems; l_z++) { l_c_gold_betazero[(l_n*l_i) + l_j] += l_a_sp[l_rowptr[l_i]+l_z] * l_b[(l_n*l_colidx[l_rowptr[l_i]+l_z])+l_j]; } } } for ( l_j = 0; l_j < l_n; l_j++ ) { for (l_i = 0; l_i < l_m; l_i++ ) { l_elems = l_rowptr[l_i+1] - l_rowptr[l_i]; for (l_z = 0; l_z < l_elems; l_z++) { l_c_gold_betaone[(l_n*l_i) + l_j] += l_a_sp[l_rowptr[l_i]+l_z] * l_b[(l_n*l_colidx[l_rowptr[l_i]+l_z])+l_j]; } } } printf("...done!\n"); /* libxsmm generated code */ printf("computing libxsmm (A sparse) solution...\n"); #ifdef _OPENMP #pragma omp parallel for private(l_z) #endif for (l_z = 0; l_z < l_n; l_z+=l_n_block) { libxsmm_dfsspmdm_execute( gemm_op_betazero, l_b+l_z, l_c_betazero+l_z ); } #ifdef _OPENMP #pragma omp parallel for private(l_z) #endif for (l_z = 0; l_z < l_n; l_z+=l_n_block) { libxsmm_dfsspmdm_execute( gemm_op_betaone, l_b+l_z, l_c_betaone+l_z ); } printf("...done!\n"); /* BLAS code */ printf("computing BLAS (A dense) solution...\n"); beta = 0.0; dgemm(&trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n ); beta = 1.0; dgemm(&trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n ); printf("...done!\n"); /* check for errors */ l_max_error = (REALTYPE)0.0; for ( l_i = 0; l_i < l_m*l_n; l_i++) { if (fabs(l_c_betazero[l_i]-l_c_gold_betazero[l_i]) > l_max_error ) { l_max_error = fabs(l_c_betazero[l_i]-l_c_gold_betazero[l_i]); } } printf("max error beta=0 (libxmm vs. gold): %f\n", l_max_error); l_max_error = (REALTYPE)0.0; for ( l_i = 0; l_i < l_m*l_n; l_i++) { if (fabs(l_c_betaone[l_i]-l_c_gold_betaone[l_i]) > l_max_error ) { l_max_error = fabs(l_c_betaone[l_i]-l_c_gold_betaone[l_i]); } } printf("max error beta=1 (libxmm vs. gold): %f\n", l_max_error); l_max_error = (REALTYPE)0.0; for ( l_i = 0; l_i < l_m*l_n; l_i++) { if (fabs(l_c_dense_betazero[l_i]-l_c_gold_betazero[l_i]) > l_max_error ) { l_max_error = fabs(l_c_dense_betazero[l_i]-l_c_gold_betazero[l_i]); } } printf("max error beta=0 (dense vs. gold): %f\n", l_max_error); l_max_error = (REALTYPE)0.0; for ( l_i = 0; l_i < l_m*l_n; l_i++) { if (fabs(l_c_dense_betaone[l_i]-l_c_gold_betaone[l_i]) > l_max_error ) { l_max_error = fabs(l_c_dense_betaone[l_i]-l_c_gold_betaone[l_i]); } } printf("max error beta=1 (dense vs. gold): %f\n", l_max_error); /* Let's measure performance */ l_start = libxsmm_timer_tick(); for ( l_j = 0; l_j < l_reps; l_j++ ) { #ifdef _OPENMP #pragma omp parallel for private(l_z) #endif for (l_z = 0; l_z < l_n; l_z+=l_n_block) { libxsmm_dfsspmdm_execute( gemm_op_betazero, l_b+l_z, l_c_betazero+l_z ); } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); fprintf(stdout, "time[s] LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, l_total/(double)l_reps ); fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f (sparse)\n", l_m, l_n, l_k, (2.0 * (double)l_elements * (double)l_n * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f (dense)\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * (((double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total ); l_start = libxsmm_timer_tick(); for ( l_j = 0; l_j < l_reps; l_j++ ) { #ifdef _OPENMP #pragma omp parallel for private(l_z) #endif for (l_z = 0; l_z < l_n; l_z+=l_n_block) { libxsmm_dfsspmdm_execute( gemm_op_betaone, l_b+l_z, l_c_betaone+l_z ); } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); fprintf(stdout, "time[s] LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, l_total/(double)l_reps ); fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f (sparse)\n", l_m, l_n, l_k, (2.0 * (double)l_elements * (double)l_n * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f (dense)\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total ); l_start = libxsmm_timer_tick(); beta = 0.0; for ( l_j = 0; l_j < l_reps; l_j++ ) { dgemm(&trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n ); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, l_total/(double)l_reps ); fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total ); l_start = libxsmm_timer_tick(); beta = 1.0; for ( l_j = 0; l_j < l_reps; l_j++ ) { dgemm(&trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n ); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, l_total/(double)l_reps ); fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total ); /* free */ libxsmm_dfsspmdm_destroy( gemm_op_betazero ); libxsmm_dfsspmdm_destroy( gemm_op_betaone ); return EXIT_SUCCESS; }
setup.h
////////////////////////////////////////////////////////////////////////////////// // // // trueke // // A multi-GPU implementation of the exchange Monte Carlo method. // // // ////////////////////////////////////////////////////////////////////////////////// // // // Copyright © 2015 Cristobal A. Navarro, Wei Huang. // // // // This file is part of trueke. // // trueke is free software: you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation, either version 3 of the License, or // // (at your option) any later version. // // // // trueke is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY; without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details. // // // // You should have received a copy of the GNU General Public License // // along with trueke. If not, see <http://www.gnu.org/licenses/>. // // // ////////////////////////////////////////////////////////////////////////////////// #ifndef _SETUP_H_ #define _SETUP_H_ /* function declarations */ void pickgpus(setup_t *s); void init(setup_t *s, int argc, int argv); void adapt_init(setup_t *s, int argc, char argv); void printparams(setup_t *s); void getparams(setup_t *s, int argc, char **argv); void newseed(int *seed); void malloc_arrays(setup_t *s); void adapt_malloc_arrays(setup_t *s); void reset(setup_t *s); void adjustparams(setup_t *s); /* adapt init */ void adapt_init(setup_t *s, int argc, char **argv){ printf("adapt_init....{\n"); fflush (stdout); /* get parameters */ getparams(s, argc, argv); /* adjust some parameters related to memory pool and active replicas*/ adjustparams(s); #ifdef MEASURE /* folders for output */ s->obsfolder = "data"; s->plotfolder = "plots"; make_output_folders(s->obsfolder, s->plotfolder); #endif /* parameter seed or random seed */ if(s->seed != 0){ gpu_pcg32_srandom_r(&s->hpcgs, &s->hpcgi, s->seed, 1); } else{ gpu_pcg32_srandom_r(&s->hpcgs, &s->hpcgi, devseed(), 1); } s->seed = gpu_pcg32_random_r(&s->hpcgs, &s->hpcgi); /* pick the GPUs */ pickgpus(s); /* set the number of threads as the number of GPUs */ omp_set_num_threads(s->ngpus); /* build the space of computation for the lattices */ s->mcblock = dim3(BX, BY / 2, BZ); s->mcgrid = dim3((s->L + BX - 1) / BX, (s->L + BY - 1) / (2 * BY), (s->L + BZ - 1) / BZ); s->lblock = dim3(BLOCKSIZE1D, 1, 1); s->lgrid = dim3((s->N + BLOCKSIZE1D - 1) / BLOCKSIZE1D, 1, 1); /* build the space of computation for random numbers and lattice simulation */ s->prng_block = dim3(BLOCKSIZE1D, 1, 1); s->prng_grid = dim3(((s->N / 4) + BLOCKSIZE1D - 1) / BLOCKSIZE1D, 1, 1); /* allocate main arrays */ adapt_malloc_arrays(s); /* create timers */ sdkCreateTimer(&(s->timer)); sdkCreateTimer(&(s->gtimer)); sdkCreateTimer(&(s->ktimer)); /* reset timers */ sdkResetTimer(&(s->timer)); sdkResetTimer(&(s->gtimer)); sdkResetTimer(&(s->ktimer)); /* print parameters */ printparams(s); //printf("}:ok\n\n"); fflush(stdout); } /* adapt malloc */ void adapt_malloc_arrays( setup_t *s ){ /* multi-gpu adaptation arrays */ s->mdlat = (int***) malloc(sizeof(int**) * s->ngpus); s->aex = (float**)malloc(sizeof(float*)*s->ngpus); s->aavex = (float**)malloc(sizeof(float*)*s->ngpus); s->aexE = (float**)malloc(sizeof(float*)*s->ngpus); s->arstream = (cudaStream_t**)malloc(sizeof(cudaStream_t*) * s->ngpus); s->apcga = (uint64_t***)malloc(sizeof(uint64_t**) * s->ngpus); s->apcgb = (uint64_t***)malloc(sizeof(uint64_t**) * s->ngpus); s->dH = (int **)malloc(sizeof(int*) * s->ngpus); s->dE = (float**)malloc(sizeof(float*) * s->ngpus); s->arts = (findex_t**)malloc(sizeof(findex_t*) * s->ngpus); s->atrs = (findex_t**)malloc(sizeof(findex_t*) * s->ngpus); s->aT = (float**)malloc(sizeof(float*)*s->ngpus); /* T is a sorted temp array */ s->T = (float*)malloc(sizeof(float)*s->Ra); /* host values for each replica */ s->E = (float*)malloc(sizeof(float)*s->Ra); // memory for H array s->hH = (int*)malloc(sizeof(int) * s->N); /* multi-GPU setup */ #pragma omp parallel { int tid, nt, r; /* set threads */ adapt_threadset(s, &tid, &nt, &r); //printf("arge malloc: tid=%i r=%i rpool = %i\n", tid, r, s->rpool[tid]); fflush(stdout); /* allocate the replica pool for each GPU */ s->mdlat[tid] = (int**) malloc(sizeof(int *) * s->rpool[tid]); /* ex is a per temperature counter array */ s->aex[tid] = (float*)malloc(sizeof(float)*s->rpool[tid]); /* avex is a per temperature counter array */ s->aavex[tid] = (float*)malloc(sizeof(float)*s->rpool[tid]); /* exchange energies */ s->aexE[tid] = (float*)malloc(sizeof(float) * s->rpool[tid]); /* CUDA streams */ s->arstream[tid] = (cudaStream_t*)malloc(sizeof(cudaStream_t) * s->rpool[tid]); /* PRNG states volume, one state per thread */ s->apcga[tid] = (uint64_t**)malloc(sizeof(uint64_t*) * s->rpool[tid]); s->apcgb[tid] = (uint64_t**)malloc(sizeof(uint64_t*) * s->rpool[tid]); /* fragmented indices for replicas temperature sorted */ s->arts[tid] = (findex_t*)malloc(sizeof(findex_t)*s->rpool[tid]); /* fragmented indices for temperatures replica sorted */ s->atrs[tid] = (findex_t*)malloc(sizeof(findex_t)*s->rpool[tid]); /* fragmented temperatures sorted */ s->aT[tid] = (float*)malloc(sizeof(float)*s->rpool[tid]); /* malloc device magnetic field -- multi-GPU */ checkCudaErrors(cudaMalloc(&(s->dH[tid]), sizeof(int)*s->N)); /* malloc device energy reductions -- multi-GPU*/ checkCudaErrors(cudaMalloc(&(s->dE[tid]), sizeof(float)*s->rpool[tid])); /* malloc the data for 'r' replicas on each GPU */ for(int k = 0; k < s->rpool[tid]; ++k){ checkCudaErrors(cudaMalloc(&(s->mdlat[tid][k]), sizeof(int) * s->N)); checkCudaErrors(cudaMalloc(&(s->apcga[tid][k]), (s->N/4) * sizeof(uint64_t))); checkCudaErrors(cudaMalloc(&(s->apcgb[tid][k]), (s->N/4) * sizeof(uint64_t))); checkCudaErrors(cudaStreamCreateWithFlags(&(s->arstream[tid][k]), cudaStreamNonBlocking)); // offset and sequence approach kernel_gpupcg_setup<<<s->prng_grid, s->prng_block, 0, s->arstream[tid][k] >>>(s->apcga[tid][k], s->apcgb[tid][k], s->N/4, s->seed + (unsigned long long)(s->N/4 * (s->rpool[tid]*tid + k)), (s->rpool[tid]*tid + k)); //printf("tid=%i N=%i N/4 = %i R = %i seed = %lu k = %lu \n", tid, s->N, s->N/4, s->R, s->seed + (unsigned long long)(s->N/4 * (s->rpool[tid]*tid + k)), (s->rpool[tid]*tid + k)); //getchar(); // skip ahead approach //kernel_gpupcg_setup_offset<<<s->prng_grid, s->prng_block, 0, s->arstream[tid][k] >>>(s->apcga[tid][k], s->apcgb[tid][k], s->N/4, s->seed, (unsigned long long)((s->ms * s->pts + s->ds)*4*s->realizations), (s->L^3)/4 * (s->R/s->ngpus * tid + k) ); cudaCheckErrors("kernel: prng reset"); } } /* host memory setup for each replica */ for(int i = 0; i < s->R; i++){ /* array of temperatures increasing order */ s->T[i] = s->TR - (s->R-1 - i)*s->dT; } int count = 0; for(int k = 0; k < s->ngpus; ++k){ for(int j = 0; j < s->gpur[k]; ++j){ s->arts[k][j] = s->atrs[k][j] = (findex_t){k, j}; s->aT[k][j] = s->TR - (float)(s->R-1 - count)*s->dT; s->aex[k][j] = 0; ++count; } } } /* set parameters */ void adjustparams(setup_t *s){ /* total number of spins per replica */ s->N = (s->L)*(s->L)*(s->L); /* shared memory steps */ s->cs = BLOCK_STEPS; /* keep original parameter R */ s->Ro = s->R; /* adjust R to a multiple of ngpus; R' = ceil(R/ngpus) *ngpus */ s->R = (int)ceil((float)s->R/(float)s->ngpus) * s->ngpus; /* compute Ra to be the final size Ra = R + TL */ s->Ra = s->R + (s->atrials * s->ains); /* set replica pools for each GPU */ s->gpur = (int*)malloc(sizeof(int) * s->ngpus); s->rpool = (int*)malloc(sizeof(int) * s->ngpus); /* measure zone */ if( s->mzone == -1 ){ s->mzone = (int) ((double)s->pts / log2(2.0 + sqrtf((double)s->pts)/(double)s->L) ); } /* last adaptation insert */ s->fam = 0; /* record original seed */ s->oseed = s->seed; for(int i=0; i < s->ngpus; ++i){ /* active replicas per gpu */ s->gpur[i] = s->R / s->ngpus; //printf("s->gpur[%i] = %i\n", i, s->gpur[i]); fflush(stdout); getchar(); /* replica pool per gpu */ s->rpool[i] = s->Ra / s->ngpus; /* place the remainder of replicas */ if( i < (s->Ra % s->ngpus) ){ s->rpool[i] += 1; } } } /* init */ void init(setup_t *s, int argc, char **argv){ /* set the number of threads as the number of GPUs */ //omp_set_num_threads(s->ngpus); //gpu_pcg32_srandom_r(&s->hpcgs, &s->hpcgi, s->seed, 1); // get another seed from master seeder //s->seed = gpu_pcg32_random_r(&s->hpcgs, &s->hpcgi); /* build the space of computation for the lattices */ s->mcblock = dim3(BX, BY/2, BZ); s->mcgrid = dim3((s->L + BX - 1)/BX, (s->L + BY - 1)/(2*BY), (s->L + BZ - 1)/BZ); s->lblock = dim3( BLOCKSIZE1D, 1, 1); s->lgrid = dim3((s->N + BLOCKSIZE1D - 1)/BLOCKSIZE1D, 1, 1); /* build the space of computation for random numbers and lattice simulation */ s->prng_block = dim3(BLOCKSIZE1D, 1, 1); s->prng_grid = dim3( ((s->N/4) + BLOCKSIZE1D - 1)/BLOCKSIZE1D, 1, 1); /* alocate main arrays */ malloc_arrays(s); /* reset table of obersvables per realization */ #ifdef MEASURE reset_realization_statistics(s, s->R); #endif } /* malloc arrays */ void malloc_arrays( setup_t *s ){ /* allocate the main arrays */ s->hlat = (int **)malloc(sizeof(int *) * s->R); s->dlat = (int **)malloc(sizeof(int *) * s->R); /* T is a sorted temp array */ s->T = (float*)malloc(sizeof(float)*s->R); /* ex is a per temperature counter array */ s->ex = (float*)malloc(sizeof(float)*s->R); /* avex is a per temperature counter array */ s->avex = (float*)malloc(sizeof(float)*s->R); /* index arrays */ s->rts = (int*)malloc(sizeof(int)*s->R); s->trs = (int*)malloc(sizeof(int)*s->R); /* host values for each replica */ s->E = (float*)malloc(sizeof(float)*s->R); s->exE = (float*)malloc(sizeof(float) * s->R); s->M = (int*)malloc(sizeof(int)*s->R); s->F1 = (float3*)malloc(sizeof(float3)*s->R); s->F2 = (float3*)malloc(sizeof(float3)*s->R); /* CUDA streams */ s->rstream = (cudaStream_t*)malloc(sizeof(cudaStream_t) * s->R); /* PRNG states volume, one state per thread */ s->pcga = (uint64_t **)malloc(sizeof(uint64_t *) * s->R); s->pcgb = (uint64_t **)malloc(sizeof(uint64_t *) * s->R); /* observables table */ s->obstable = (obset_t*)malloc(sizeof(obset_t)*s->R); // memory for H array s->hH = (int*)malloc(sizeof(int) * s->N); /* global index of the first replica in each GPU */ /* a copy of the magnetic field 'dH' on each GPU */ s->dH = (int **)malloc(sizeof(int*) * s->ngpus); /* device values for GPUs */ s->dE = (float**)malloc(sizeof(float*) * s->ngpus); s->dM = (int**)malloc(sizeof(int*) * s->ngpus); s->dF1 = (float3**)malloc(sizeof(float3*) * s->ngpus); s->dF2 = (float3**)malloc(sizeof(float3*) * s->ngpus); /* multi-GPU setup */ #pragma omp parallel { int tid, nt, r, k; /* set threads */ threadset(s, &tid, &nt, &r); /* malloc the data for 'r' replicas on each GPU */ for(int j = 0; j < r; ++j){ k = tid * r + j; checkCudaErrors(cudaMalloc(&(s->dlat[k]), sizeof(int) * s->N)); checkCudaErrors(cudaMalloc(&(s->pcga[k]), (s->N/4) * sizeof(uint64_t))); checkCudaErrors(cudaMalloc(&(s->pcgb[k]), (s->N/4) * sizeof(uint64_t))); checkCudaErrors(cudaStreamCreateWithFlags(&(s->rstream[k]), cudaStreamNonBlocking)); kernel_gpupcg_setup<<<s->prng_grid, s->prng_block, 0, s->rstream[k] >>>(s->pcga[k], s->pcgb[k], s->N/4, s->seed + s->N/4 * k, k); //printf("thread %i, N=%i N/4 = %i R = %i ngpus = %i R/ngpus = %i k = %i kN/4 = %i seed = %lu \n", tid, s->N, s->N/4, s->R, s->ngpus, s->R/s->ngpus, k, s->N/4 * k, s->seed + s->N/4*k); //getchar(); //cudaDeviceSynchronize(); cudaCheckErrors("kernel: prng reset"); } /* malloc device magnetic field -- multi-GPU */ checkCudaErrors(cudaMalloc(&(s->dH[tid]), sizeof(int)*s->N)); /* malloc device energy reductions -- multi-GPU*/ checkCudaErrors(cudaMalloc(&(s->dE[tid]), sizeof(float)*r)); checkCudaErrors(cudaMalloc(&(s->dM[tid]), sizeof(int)*r)); checkCudaErrors(cudaMalloc(&(s->dF1[tid]), sizeof(float3)*r)); checkCudaErrors(cudaMalloc(&(s->dF2[tid]), sizeof(float3)*r)); /* P2P memory access is not working properly, for the moment just use standard device-host-device transfers */ /* enable peer to peer memory access between GPUs */ //if(tid != 0){ //int access; //printf("\tGPU%i PeerAccess to GPU%i.....", s->gpus[tid].i, s->gpus[0].i); fflush(stdout); //checkCudaErrors(cudaDeviceCanAccessPeer(&access, s->gpus[tid].i, s->gpus[0].i)); //printf("%i\n", access); fflush(stdout); //checkCudaErrors(cudaDeviceEnablePeerAccess( s->gpus[0].i, 0 )); //} //else{ //checkCudaErrors(cudaDeviceEnablePeerAccess( s->gpus[1].i, 0 )); //} } /* host memory setup for each replica */ for(int i = 0; i < s->R; i++){ /* replica allocation */ s->hlat[i]= (int*)malloc(sizeof(int) * s->N); /* array of temperatures increasing order */ s->T[i] = s->TR - (s->R-1 - i)*s->dT; /* exchange counters initialization */ s->ex[i] = 0; /* initialize index arrays */ s->rts[i] = s->trs[i] = i; } int count = 0; /* flatten the temperatures */ for(int i=0; i<s->ngpus; ++i){ for(int j=0; j<s->gpur[i]; ++j){ s->T[count++] = s->aT[i][j]; } } printarray<float>(s->T, s->R, "T"); printf("\n"); } /* pick the idlest 'n' gpus */ void pickgpus( setup_t *s ){ /* structs for handling GPU queries error codes */ nvmlReturn_t r; /* some function variables */ unsigned int devcount, i, u; /* struct with GPU information */ gpu_t *gpus; char version[80]; /* init nvml library for GPU queries */ r = nvmlInit(); nvml_check(r, "nvmlInit"); /* nvml: get driver version */ r = nvmlSystemGetDriverVersion(version, 80); nvml_check(r, "nvmlSystemGetDriverVersion"); printf("\n\tDriver version: %s \n", version); /* get number of devices */ r = nvmlDeviceGetCount(&devcount); nvml_check(r, "nvmlDeviceGetCount"); printf("\tMAXGPUS = %d\n", devcount); /* malloc one gpu_t struct for each device */ gpus = (gpu_t*)malloc(sizeof(gpu_t)*devcount); /* return error if n > devcount */ if( s->ngpus > devcount){ fprintf(stderr, "pt error: [g = %i] > [MAXGPUS = %i]. (try g <= MAXGPUS)\n", s->ngpus, devcount); exit(1); } /* get the information of each GPU */ printf("\tListing devices:\n"); for(i = 0; i < devcount; i++){ unsigned int index; nvmlDevice_t dev; char name[64]; char uuid[128]; //nvmlComputeMode_t compute_mode; nvmlUtilization_t util; r = nvmlDeviceGetHandleByIndex(i, &dev); nvml_check(r, "nvmlDeviceGetHandleByIndex"); r = nvmlDeviceGetName(dev, name, sizeof(name)/sizeof(name[0])); nvml_check(r, "nvmlDeviceGetName"); r = nvmlDeviceGetIndex(dev, &index); r = nvmlDeviceGetUUID(dev, uuid, 128); printf("\t\tGPU%d %s, index=%i, UUID=%s", i, name, index, uuid); r = nvmlDeviceGetUtilizationRates(dev, &util); u = nvml_check(r, "nvmlDeviceGetUtilizationRates"); if(u){ printf(" -> util = %i%%\n", util.gpu); gpus[i].i = index; gpus[i].u = util.gpu; gpus[i].m = util.memory; } else{ gpus[i].i = i; } } if(u){ //printf("not sorted\n"); //for(i = 0; i < devcount; i++) // printf("gpu[%i] = (i,u,m) ---> (%i, %i, %i)\n", i, gpus[i].i, gpus[i].u, gpus[i].m); //printf("sorted\n"); qsort(gpus, devcount, sizeof(gpu), compgpu); //for(i = 0; i < devcount; i++) // printf("gpu[%i] = (i,u,m) ---> (%i, %i, %i)\n", i, gpus[i].i, gpus[i].u, gpus[i].m); } /* malloc info for 'n' GPUs */ s->gpus = (gpu_t*)malloc(sizeof(gpu_t)*s->ngpus); printf("\tchosen GPU(s) = {"); for(i = 0; i < s->ngpus; i++){ s->gpus[i] = gpus[i]; printf(" GPU%i", s->gpus[i].i); } printf(" }\n"); /* shutdown the nvml library */ r = nvmlShutdown(); nvml_check(r, "nvmlShutdown"); /* free the auxiliary gpu_t array */ free(gpus); } /* print parameters */ void printparams(setup_t *s){ printf("\tparameters:{\n"); printf("\t\tL: %i\n", s->L); printf("\t\tvolume: %i\n", s->N); printf("\t\t[TR,dT]: [%f, %f]\n", s->TR, s->dT); printf("\t\t[atrials, ains, apts, ams]: [%i, %i, %i, %i]\n", s->atrials, s->ains, s->apts, s->ams); printf("\t\tmag_field h: %f\n", s->h); printf("\t\treplicas: %i\n", s->R); printf("\t\tptsteps: %i\n", s->pts); printf("\t\tmzone: %i\n", s->mzone); printf("\t\tdrop_steps: %i\n", s->ds); printf("\t\tmcsteps: %i\n", s->ms); printf("\t\tmeasure: %i\n", s->fs); printf("\t\tperiod: %i\n", s->period); printf("\t\tnblocks: %i\n", s->blocks); printf("\t\trealizations: %i\n", s->realizations); printf("\t\tseed: %lu\n", s->seed); printf("\t\tmicrosteps: %i\n", s->cs); printf("\t\tNGPUS: %i\n\t}\n", s->ngpus); /* print space of computation */ printf("\tsoc{\n\t\tmcgrid is %i x %i x %i mcblock %i x %i x %i\n\t\tlgrid is %i x %i x %i lblock %i x %i x %i \n\t}\n", s->mcgrid.x, s->mcgrid.y, s->mcgrid.z, s->mcblock.x, s->mcblock.y, s->mcblock.z, s->lgrid.x, s->lgrid.y, s->lgrid.z, s->lblock.x, s->lblock.y, s->lblock.z); } /* get parameters */ void getparams(setup_t *s, int argc, char **argv){ /* if the number or arguments is not correct, stop the program */ if(argc != 28){ printf("run as:\n./bin/trueke -l <L> <R> -t <T> <dT> -a <tri> <ins> <pts> <ms> -h <h> -s <pts> <mz> <eq> <ms> <meas> <per> -br <b> <r> -z <seed> -g <x>\n"); exit(1); } else{ for(int i=0; i<argc; i++){ /* lattice size and number of replicas */ if(strcmp(argv[i],"-l") == 0){ s->L = atoi(argv[i+1]); s->R = atoi(argv[i+2]); } /* get TR and dT */ else if(strcmp(argv[i],"-t") == 0){ s->TR = atof(argv[i+1]); s->dT = atof(argv[i+2]); } /* the magnetic field constant */ else if(strcmp(argv[i],"-h") == 0){ s->h = atof(argv[i+1]); } /* ptsteps, drop steps, mc steps, final steps */ else if(strcmp(argv[i],"-s") == 0){ s->pts = atof(argv[i+1]); s->mzone = atoi(argv[i+2]); s->ds = atof(argv[i+3]); s->ms = atof(argv[i+4]); s->fs = atof(argv[i+5]); s->period = atof(argv[i+6]); } /* number of measure blocks and realizations */ else if(strcmp(argv[i],"-br") == 0){ s->blocks = atof(argv[i+1]); s->realizations = atof(argv[i+2]); } /* adaptative dt parameters */ else if(strcmp(argv[i], "-a") == 0){ s->atrials = atoi(argv[i+1]); s->ains = atoi(argv[i+2]); s->apts = atoi(argv[i+3]); s->ams = atoi(argv[i+4]); } /* number of gpus */ else if(strcmp(argv[i],"-g") == 0){ s->ngpus = atoi(argv[i+1]); } /* seed, (pass 0 for /dev/urandom) */ else if(strcmp(argv[i],"-z") == 0){ s->seed = atoi(argv[i+1]); } } } if( (s->L % 32) != 0 ) fprintf(stderr, "lattice dimensional size must be multiples of 32"); } #endif
no_omp_cpu.c
/* * Copyright (c) 2015, 2016, 2017, 2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define _GNU_SOURCE #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <sched.h> #include <assert.h> void no_omp_cpu(int num_cpu, cpu_set_t *no_omp) { int cpu_index, i; for (i = 0; i < num_cpu; ++i) { CPU_SET(i, no_omp); } #pragma omp parallel default(shared) { #pragma omp critical { cpu_index = sched_getcpu(); assert(cpu_index < num_cpu); CPU_CLR(cpu_index, no_omp); } /* end pragma omp critical */ } /* end pragam omp parallel */ } int main(int argc, char **argv) { int i, num_cpu = sysconf(_SC_NPROCESSORS_ONLN); cpu_set_t *no_omp = CPU_ALLOC(num_cpu); no_omp_cpu(num_cpu, no_omp); printf("Free cpu list: "); for (i = 0; i < num_cpu; ++i) { if (CPU_ISSET(i, no_omp)) { printf("%i ", i); } } printf("\n\n"); CPU_FREE(no_omp); return 0; }
oldoffice_fmt_plug.c
/* * MS Office 97-2003 cracker patch for JtR. Hacked together during May of * 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * Copyright (c) 2014, magnum * Copyright (c) 2009, David Leblanc (http://offcrypto.codeplex.com/) * * License: Microsoft Public License (MS-PL) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_oldoffice; #elif FMT_REGISTERS_H john_register_one(&fmt_oldoffice); #else #include <stdint.h> #include <string.h> #include <errno.h> #ifdef _OPENMP #include <omp.h> #endif #include "md5.h" #include "rc4.h" #include "sha.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "unicode.h" #include "dyna_salt.h" #include "memdbg.h" #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #define FORMAT_LABEL "oldoffice" #define FORMAT_NAME "MS Office <= 2003" #define ALGORITHM_NAME "MD5/SHA1 RC4 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1000 #define PLAINTEXT_LENGTH 64 #define BINARY_SIZE 0 #define BINARY_ALIGN MEM_ALIGN_NONE #define SALT_SIZE sizeof(dyna_salt*) #define SALT_ALIGN MEM_ALIGN_WORD #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define CIPHERTEXT_LENGTH (TAG_LEN + 120) #define FORMAT_TAG "$oldoffice$" #define TAG_LEN (sizeof(FORMAT_TAG) - 1) static struct fmt_tests oo_tests[] = { {"$oldoffice$1*de17a7f3c3ff03a39937ba9666d6e952*2374d5b6ce7449f57c9f252f9f9b53d2*e60e1185f7aecedba262f869c0236f81", "test"}, {"$oldoffice$0*e40b4fdade5be6be329c4238e2099b8a*259590322b55f7a3c38cb96b5864e72d*2e6516bfaf981770fe6819a34998295d", "123456789012345"}, {"$oldoffice$4*163ae8c43577b94902f58d0106b29205*87deff24175c2414cb1b2abdd30855a3*4182446a527fe4648dffa792d55ae7a15edfc4fb", "Google123"}, /* Meet-in-the-middle candidate produced with hashcat -m9710 */ /* Real pw is "hashcat", one collision is "zvDtu!" */ {"", "zvDtu!", {"", "$oldoffice$1*d6aabb63363188b9b73a88efb9c9152e*afbbb9254764273f8f4fad9a5d82981f*6f09fd2eafc4ade522b5f2bee0eaf66d","f2ab1219ae"} }, #if PLAINTEXT_LENGTH >= 24 /* 2003-RC4-40bit-MS-Base-Crypto-1.0_myhovercraftisfullofeels_.doc */ {"$oldoffice$3*9f32522fe9bcb69b12f39d3c24b39b2f*fac8b91a8a578468ae7001df4947558f*f2e267a5bea45736b52d6d1051eca1b935eabf3a", "myhovercraftisfullofeels"}, /* Test-RC4-40bit-MS-Base-DSS_myhovercraftisfullofeels_.doc */ {"$oldoffice$3*095b777a73a10fb6bcd3e48d50f8f8c5*36902daab0d0f38f587a84b24bd40dce*25db453f79e8cbe4da1844822b88f6ce18a5edd2", "myhovercraftisfullofeels"}, /* 2003-RC4-40bit-MS-Base-DH-SChan_myhovercraftisfullofeels_.doc */ {"$oldoffice$3*284bc91cb64bc847a7a44bc7bf34fb69*1f8c589c6fcbd43c42b2bc6fff4fd12b*2bc7d8e866c9ea40526d3c0a59e2d37d8ded3550", "myhovercraftisfullofeels"}, /* Test-RC4-128bit-MS-Strong-Crypto_myhovercraftisfullofeels_.doc */ {"$oldoffice$4*a58b39c30a06832ee664c1db48d17304*986a45cc9e17e062f05ceec37ec0db17*fe0c130ef374088f3fec1979aed4d67459a6eb9a", "myhovercraftisfullofeels"}, /* 2003-RC4-40bit-MS-Base-1.0_myhovercraftisfullofeels_.xls */ {"$oldoffice$3*f426041b2eba9745d30c7949801f7d3a*888b34927e5f31e2703cc4ce86a6fd78*ff66200812fd06c1ba43ec2be9f3390addb20096", "myhovercraftisfullofeels"}, #endif /* the following hash was extracted from Proc2356.ppt (manually + by oldoffice2john.py */ {"$oldoffice$3*DB575DDA2E450AB3DFDF77A2E9B3D4C7*AB183C4C8B5E5DD7B9F3AF8AE5FFF31A*B63594447FAE7D4945D2DAFD113FD8C9F6191BF5", "crypto"}, {"$oldoffice$3*3fbf56a18b026e25815cbea85a16036c*216562ea03b4165b54cfaabe89d36596*91308b40297b7ce31af2e8c57c6407994b205590", "openwall"}, {NULL} }; /* Password encoded in UCS-2 */ static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1]; /* UCS-2 password length, in octets */ static int *saved_len; /* Last hash with this salt and plain */ static unsigned char (*mitm_key)[16]; static unsigned char (*rc4_key)[16]; static int any_cracked, *cracked; static size_t cracked_size; static int new_keys; typedef struct { dyna_salt dsalt; int type; unsigned char salt[16]; unsigned char verifier[16]; /* or encryptedVerifier */ unsigned char verifierHash[20]; /* or encryptedVerifierHash */ unsigned int has_mitm; unsigned char mitm[5]; /* Meet-in-the-middle hint, if we have one */ } custom_salt; static struct { int ct_hash; unsigned char mitm[10]; } mitm_catcher; static custom_salt cs; static custom_salt *cur_salt = &cs; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (options.target_enc == UTF_8) self->params.plaintext_length = 3 * PLAINTEXT_LENGTH > 125 ? 125 : 3 * PLAINTEXT_LENGTH; saved_key = mem_alloc(self->params.max_keys_per_crypt * sizeof(*saved_key)); saved_len = mem_alloc(self->params.max_keys_per_crypt * sizeof(*saved_len)); mitm_key = mem_alloc(self->params.max_keys_per_crypt * sizeof(*mitm_key)); rc4_key = mem_alloc(self->params.max_keys_per_crypt * sizeof(*rc4_key)); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc(1, cracked_size); } static void done(void) { MEM_FREE(cracked); MEM_FREE(rc4_key); MEM_FREE(mitm_key); MEM_FREE(saved_len); MEM_FREE(saved_key); } /* Based on ldr_cracked_hash from loader.c */ #define HASH_LOG 30 #define HASH_SIZE (1 << HASH_LOG) static int hex_hash(char *ciphertext) { unsigned int hash, extra; unsigned char *p = (unsigned char *)ciphertext; hash = p[0] | 0x20; /* ASCII case insensitive */ if (!hash) goto out; extra = p[1] | 0x20; if (!extra) goto out; p += 2; while (*p) { hash <<= 1; extra <<= 1; hash += p[0] | 0x20; if (!p[1]) break; extra += p[1] | 0x20; p += 2; if (hash & 0xe0000000) { hash ^= hash >> HASH_LOG; extra ^= extra >> (HASH_LOG - 1); hash &= HASH_SIZE - 1; } } hash -= extra; hash ^= extra << (HASH_LOG / 2); hash ^= hash >> HASH_LOG; hash &= HASH_SIZE - 1; out: return hash; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int type, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LEN)) return 0; if (strlen(ciphertext) > CIPHERTEXT_LENGTH) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += TAG_LEN; if (!(ptr = strtokm(ctcopy, "*"))) /* type */ goto error; type = atoi(ptr); if (type < 0 || type > 4) goto error; if (!(ptr = strtokm(NULL, "*"))) /* salt */ goto error; if (hexlen(ptr, &extra) != 32 || extra) goto error; if (!(ptr = strtokm(NULL, "*"))) /* verifier */ goto error; if (hexlen(ptr, &extra) != 32 || extra) goto error; if (!(ptr = strtokm(NULL, "*"))) /* verifier hash */ goto error; if (type < 3 && (hexlen(ptr, &extra) != 32 || extra)) goto error; else if (type >= 3 && (hexlen(ptr, &extra) != 40 || extra)) goto error; /* * Deprecated field: mitm hash (40-bit RC4). The new way to put it is in the * uid field, like hashcat's example hash. */ if (type <= 3 && (ptr = strtokm(NULL, "*"))) { if (hexlen(ptr, &extra) != 10 || extra) goto error; } MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } /* uid field may contain a meet-in-the-middle hash */ static char *prepare(char *split_fields[10], struct fmt_main *self) { if (split_fields[0] && valid(split_fields[0], self) && split_fields[1] && hexlen(split_fields[1], 0) == 10) { mitm_catcher.ct_hash = hex_hash(split_fields[0]); memcpy(mitm_catcher.mitm, split_fields[1], 10); return split_fields[0]; } else if (valid(split_fields[1], self) && split_fields[2] && hexlen(split_fields[2], 0) == 10) { mitm_catcher.ct_hash = hex_hash(split_fields[1]); memcpy(mitm_catcher.mitm, split_fields[2], 10); } return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH]; char *p; int extra; strnzcpy(out, ciphertext, sizeof(out)); strlwr(out); /* Drop legacy embedded MITM hash */ if ((p = strrchr(out, '*')) && (hexlen(&p[1], &extra) == 10 || extra)) *p = 0; return out; } static void *get_salt(char *ciphertext) { static void *ptr; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, sizeof(cs)); ctcopy += TAG_LEN; /* skip over "$oldoffice$" */ p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.verifier[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); if (cs.type < 3) { for (i = 0; i < 16; i++) cs.verifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } else { for (i = 0; i < 20; i++) cs.verifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } if ((p = strtokm(NULL, "*"))) { /* Deprecated field */ cs.has_mitm = 1; for (i = 0; i < 5; i++) cs.mitm[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } else if (hex_hash(ciphertext) == mitm_catcher.ct_hash) { cs.has_mitm = 1; for (i = 0; i < 5; i++) cs.mitm[i] = atoi16[ARCH_INDEX(mitm_catcher.mitm[i * 2])] * 16 + atoi16[ARCH_INDEX(mitm_catcher.mitm[i * 2 + 1])]; } else cs.has_mitm = 0; MEM_FREE(keeptr); cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(custom_salt, type); cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(custom_salt, type, has_mitm, 0); cs.dsalt.salt_alloc_needs_free = 0; ptr = mem_alloc_copy(&cs, sizeof(custom_salt), MEM_ALIGN_WORD); return &ptr; } static void set_salt(void *salt) { if (memcmp(cur_salt->salt, (*(custom_salt**)salt)->salt, 16)) new_keys = 1; cur_salt = *(custom_salt**)salt; } static int salt_compare(const void *x, const void *y) { int c; c = memcmp((*(custom_salt**)x)->salt, (*(custom_salt**)y)->salt, 16); if (c) return c; c = dyna_salt_cmp((void*)x, (void*)y, SALT_SIZE); return c; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i; RC4_KEY key; if (cur_salt->type < 3) { MD5_CTX ctx; unsigned char pwdHash[16]; unsigned char hashBuf[21 * 16]; if (new_keys) { unsigned char key_hash[16]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], saved_len[index]); MD5_Final(key_hash, &ctx); for (i = 0; i < 16; i++) { memcpy(hashBuf + i * 21, key_hash, 5); memcpy(hashBuf + i * 21 + 5, cur_salt->salt, 16); } MD5_Init(&ctx); MD5_Update(&ctx, hashBuf, 21 * 16); MD5_Final(mitm_key[index], &ctx); } // Early reject if we got a hint if (cur_salt->has_mitm && memcmp(mitm_key[index], cur_salt->mitm, 5)) continue; if (new_keys) { memcpy(hashBuf, mitm_key[index], 5); memset(hashBuf + 5, 0, 4); MD5_Init(&ctx); MD5_Update(&ctx, hashBuf, 9); MD5_Final(rc4_key[index], &ctx); } RC4_set_key(&key, 16, rc4_key[index]); /* rc4Key */ RC4(&key, 16, cur_salt->verifier, hashBuf); /* encryptedVerifier */ RC4(&key, 16, cur_salt->verifierHash, hashBuf + 16); /* encryptedVerifierHash */ /* hash the decrypted verifier */ MD5_Init(&ctx); MD5_Update(&ctx, hashBuf, 16); MD5_Final(pwdHash, &ctx); if (!memcmp(pwdHash, hashBuf + 16, 16)) #ifdef _OPENMP #pragma omp critical #endif { any_cracked = cracked[index] = 1; cur_salt->has_mitm = 1; memcpy(cur_salt->mitm, mitm_key[index], 5); } } else { SHA_CTX ctx; unsigned char H0[24]; unsigned char Hfinal[20]; unsigned char DecryptedVerifier[16]; unsigned char DecryptedVerifierHash[20]; if (new_keys) { unsigned char key_hash[20]; SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->salt, 16); SHA1_Update(&ctx, saved_key[index], saved_len[index]); SHA1_Final(H0, &ctx); memset(&H0[20], 0, 4); SHA1_Init(&ctx); SHA1_Update(&ctx, H0, 24); SHA1_Final(key_hash, &ctx); if (cur_salt->type < 4) { memcpy(mitm_key[index], key_hash, 5); memset(&mitm_key[index][5], 0, 11); } else memcpy(mitm_key[index], key_hash, 16); } // Early reject if we got a hint if (cur_salt->has_mitm && memcmp(mitm_key[index], cur_salt->mitm, 5)) continue; RC4_set_key(&key, 16, mitm_key[index]); /* dek */ RC4(&key, 16, cur_salt->verifier, DecryptedVerifier); RC4(&key, 16, cur_salt->verifierHash, DecryptedVerifierHash); SHA1_Init(&ctx); SHA1_Update(&ctx, DecryptedVerifier, 16); SHA1_Final(Hfinal, &ctx); if (!memcmp(Hfinal, DecryptedVerifierHash, 16)) #ifdef _OPENMP #pragma omp critical #endif { any_cracked = cracked[index] = 1; if (cur_salt->type < 4) { cur_salt->has_mitm = 1; memcpy(cur_salt->mitm, mitm_key[index], 5); } } } } new_keys = 0; return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { extern volatile int bench_running; if (cur_salt->type < 4 && !bench_running) { unsigned char *cp, out[11]; int i; cp = cur_salt->mitm; for (i = 0; i < 5; i++) { out[2 * i + 0] = itoa16[*cp >> 4]; out[2 * i + 1] = itoa16[*cp & 0xf]; cp++; } out[10] = 0; fprintf(stderr, "MITM key: %s\n", out); } return 1; } static void set_key(char *key, int index) { /* convert key to UTF-16LE */ saved_len[index] = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key)); if (saved_len[index] < 0) saved_len[index] = strlen16(saved_key[index]); saved_len[index] <<= 1; new_keys = 1; } static char *get_key(int index) { return (char*)utf16_to_enc(saved_key[index]); } static unsigned int oo_hash_type(void *salt) { custom_salt *my_salt; my_salt = *(custom_salt**)salt; return (unsigned int) my_salt->type; } struct fmt_main fmt_oldoffice = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_DYNA_SALT, { "hash type", }, { FORMAT_TAG }, oo_tests }, { init, done, fmt_default_reset, prepare, valid, split, fmt_default_binary, get_salt, { oo_hash_type, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, salt_compare, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
dtype.h
#pragma once #include <cblas.h> #include <stdlib.h> #include <immintrin.h> #include <faiss/IndexIVF.h> #include <faiss/utils/Heap.h> #ifdef OPT_DTYPE_UTILS namespace faiss { //==================================Convertion================================ inline const float* convert_x_T_impl (size_t, const float* x, float*) { return x; } template <typename T> const T* convert_x_T_impl (size_t d, const float* x, T*) { T* conv_x = new T[d]; for (size_t i = 0; i < d; i++) { conv_x[i] = static_cast<T> (x[i]); } return conv_x; } template <typename T> inline const T* convert_x_T (size_t d, const float* x) { return convert_x_T_impl (d, x, (T*)nullptr); } inline void del_converted_x_T (size_t, const float*) { } template <typename T> inline void del_converted_x_T (size_t, const T* conv_x) { delete[] conv_x; } template <typename T> struct Converter_T { const size_t d; const T* const x; Converter_T (size_t d, const float* x): d (d), x (convert_x_T<T> (d, x)) { } ~Converter_T () { del_converted_x_T (d, x); } }; //==============================Distance Function============================= template <typename Tdis, typename T> Tdis vec_IP_ref_T (const T* x, const T* y, size_t d, Tdis sum = 0) { for (size_t i = 0; i < d; i++) { sum += static_cast<Tdis> (x[i]) * static_cast<Tdis> (y[i]); } return sum; } inline float vec_IP_ref_T (const float* x, const float* y, size_t d) { return vec_IP_ref_T<float> (x, y, d); } inline float vec_IP_ref_T (const bfp16_t* x, const bfp16_t* y, size_t d) { return vec_IP_ref_T<float> (x, y, d); } template <typename Tdis, typename T> Tdis vec_L2Sqr_ref_T (const T* x, const T* y, size_t d, Tdis sum = 0) { for (size_t i = 0; i < d; i++) { Tdis diff = static_cast<Tdis> (x[i]) - static_cast<Tdis> (y[i]); sum += diff * diff; } return sum; } inline float vec_L2Sqr_ref_T (const float* x, const float* y, size_t d) { return vec_L2Sqr_ref_T<float> (x, y, d); } inline float vec_L2Sqr_ref_T (const bfp16_t* x, const bfp16_t* y, size_t d) { return vec_L2Sqr_ref_T<float> (x, y, d); } #ifdef __SSE4_1__ #define USE_SIMD_128 inline __m128 _mm_loadu_ps_T (const float* x) { return _mm_loadu_ps (x); } inline __m128 _mm_loadu_ps_T (const bfp16_t* x) { return _mm_castsi128_ps (_mm_unpacklo_epi16 ( _mm_setzero_si128 (), _mm_loadl_epi64 ((const __m128i*)x))); } template <typename T> float vec_IP_fp_128b_T (const T* x, const T* y, size_t d, __m128 msum = _mm_setzero_ps ()) { while (d >= 4) { __m128 mx = _mm_loadu_ps_T (x); x += 4; __m128 my = _mm_loadu_ps_T (y); y += 4; msum = _mm_add_ps (msum, _mm_mul_ps (mx, my)); d -= 4; } msum = _mm_hadd_ps (msum, msum); msum = _mm_hadd_ps (msum, msum); float sum = _mm_cvtss_f32 (msum); return d == 0 ? sum : vec_IP_ref_T<float> (x, y, d, sum); } inline float vec_IP_128b_T (const float* x, const float* y, size_t d) { return vec_IP_fp_128b_T (x, y, d); } inline float vec_IP_128b_T (const bfp16_t* x, const bfp16_t* y, size_t d) { return vec_IP_fp_128b_T (x, y, d); } template <typename T> float vec_L2Sqr_fp_128b_T (const T* x, const T* y, size_t d, __m128 msum = _mm_setzero_ps ()) { while (d >= 4) { __m128 mx = _mm_loadu_ps_T (x); x += 4; __m128 my = _mm_loadu_ps_T (y); y += 4; __m128 mdiff = _mm_sub_ps (mx, my); msum = _mm_add_ps (msum, _mm_mul_ps (mdiff, mdiff)); d -= 4; } msum = _mm_hadd_ps (msum, msum); msum = _mm_hadd_ps (msum, msum); float sum = _mm_cvtss_f32 (msum); return d == 0 ? sum : vec_L2Sqr_ref_T<float> (x, y, d, sum); } inline float vec_L2Sqr_128b_T (const float* x, const float* y, size_t d) { return vec_L2Sqr_fp_128b_T (x, y, d); } inline float vec_L2Sqr_128b_T (const bfp16_t* x, const bfp16_t* y, size_t d) { return vec_L2Sqr_fp_128b_T (x, y, d); } #endif #ifdef __AVX2__ #ifndef USE_SIMD_128 #error "SIMD 256 must have SIMD 128 enabled" #endif #define USE_SIMD_256 inline __m256 _mm256_loadu_ps_T (const float* x) { return _mm256_loadu_ps (x); } inline __m256 _mm256_loadu_ps_T (const bfp16_t* x) { return _mm256_castsi256_ps ( _mm256_unpacklo_epi16 ( _mm256_setzero_si256 (), _mm256_insertf128_si256 ( _mm256_castsi128_si256 ( _mm_loadl_epi64 ((const __m128i*)x)), _mm_loadl_epi64 ((const __m128i*)(x + 4)), 1))); } template <typename T> float vec_IP_fp_256b_T (const T* x, const T* y, size_t d, __m256 msum = _mm256_setzero_ps ()) { while (d >= 8) { __m256 mx = _mm256_loadu_ps_T (x); x += 8; __m256 my = _mm256_loadu_ps_T (y); y += 8; msum = _mm256_add_ps (msum, _mm256_mul_ps (mx, my)); d -= 8; } __m128 msum2 = _mm256_extractf128_ps (msum, 1); msum2 = _mm_add_ps (msum2, _mm256_extractf128_ps (msum, 0)); return vec_IP_fp_128b_T (x, y, d, msum2); } inline float vec_IP_256b_T (const float* x, const float* y, size_t d) { return vec_IP_fp_256b_T (x, y, d); } inline float vec_IP_256b_T (const bfp16_t* x, const bfp16_t* y, size_t d) { return vec_IP_fp_256b_T (x, y, d); } template <typename T> float vec_L2Sqr_fp_256b_T (const T* x, const T* y, size_t d, __m256 msum = _mm256_setzero_ps ()) { while (d >= 8) { __m256 mx = _mm256_loadu_ps_T (x); x += 8; __m256 my = _mm256_loadu_ps_T (y); y += 8; __m256 mdiff = _mm256_sub_ps (mx, my); msum = _mm256_add_ps (msum, _mm256_mul_ps (mdiff, mdiff)); d -= 8; } __m128 msum2 = _mm256_extractf128_ps (msum, 1); msum2 = _mm_add_ps (msum2, _mm256_extractf128_ps (msum, 0)); return vec_L2Sqr_fp_128b_T (x, y, d, msum2); } inline float vec_L2Sqr_256b_T (const float* x, const float* y, size_t d) { return vec_L2Sqr_fp_256b_T (x, y, d); } inline float vec_L2Sqr_256b_T (const bfp16_t* x, const bfp16_t* y, size_t d) { return vec_L2Sqr_fp_256b_T (x, y, d); } #endif #if defined (__AVX512F__) && defined(__AVX512DQ__) && defined(__AVX512BW__) \ && defined(__AVX512VL__) #ifndef USE_SIMD_256 #error "SIMD 512 must have SIMD 256 enabled" #endif #define USE_SIMD_512 inline __m512 _mm512_loadu_ps_T (const float* x) { return _mm512_loadu_ps (x); } inline __m512 _mm512_loadu_ps_T (const bfp16_t* x) { return _mm512_castsi512_ps ( _mm512_unpacklo_epi16 ( _mm512_setzero_si512 (), _mm512_inserti64x4 ( _mm512_castsi256_si512 ( _mm256_inserti32x4 ( _mm256_castsi128_si256 ( _mm_loadl_epi64 ((const __m128i*)x)), _mm_loadl_epi64 ((const __m128i*)(x + 4)), 1)), _mm256_inserti32x4 ( _mm256_castsi128_si256 ( _mm_loadl_epi64 ((const __m128i*)(x + 8))), _mm_loadl_epi64 ((const __m128i*)(x + 12)), 1), 1))); } template <typename T> float vec_IP_fp_512b_T (const T* x, const T* y, size_t d, __m512 msum = _mm512_setzero_ps ()) { while (d >= 16) { __m512 mx = _mm512_loadu_ps_T (x); x += 16; __m512 my = _mm512_loadu_ps_T (y); y += 16; msum = _mm512_add_ps (msum, _mm512_mul_ps (mx, my)); d -= 16; } __m256 msum2 = _mm512_extractf32x8_ps (msum, 1); msum2 = _mm256_add_ps (msum2, _mm512_extractf32x8_ps (msum, 0)); return vec_IP_fp_256b_T (x, y, d, msum2); } inline float vec_IP_512b_T (const float* x, const float* y, size_t d) { return vec_IP_fp_512b_T (x, y, d); } inline float vec_IP_512b_T (const bfp16_t* x, const bfp16_t* y, size_t d) { return vec_IP_fp_512b_T (x, y, d); } template <typename T> float vec_L2Sqr_fp_512b_T (const T* x, const T* y, size_t d, __m512 msum = _mm512_setzero_ps ()) { while (d >= 16) { __m512 mx = _mm512_loadu_ps_T (x); x += 16; __m512 my = _mm512_loadu_ps_T (y); y += 16; __m512 mdiff = _mm512_sub_ps (mx, my); msum = _mm512_add_ps (msum, _mm512_mul_ps (mdiff, mdiff)); d -= 16; } __m256 msum2 = _mm512_extractf32x8_ps (msum, 1); msum2 = _mm256_add_ps (msum2, _mm512_extractf32x8_ps (msum, 0)); return vec_L2Sqr_fp_256b_T (x, y, d, msum2); } inline float vec_L2Sqr_512b_T (const float* x, const float* y, size_t d) { return vec_L2Sqr_fp_512b_T (x, y, d); } inline float vec_L2Sqr_512b_T (const bfp16_t* x, const bfp16_t* y, size_t d) { return vec_L2Sqr_fp_512b_T (x, y, d); } #endif #if defined (USE_SIMD_512) template <typename T> inline float vec_IP_T (const T* x, const T* y, size_t d) { return vec_IP_512b_T (x, y, d); } template <typename T> inline float vec_L2Sqr_T (const T* x, const T* y, size_t d) { return vec_L2Sqr_512b_T (x, y, d); } #elif defined (USE_SIMD_256) template <typename T> inline float vec_IP_T (const T* x, const T* y, size_t d) { return vec_IP_256b_T (x, y, d); } template <typename T> inline float vec_L2Sqr_T (const T* x, const T* y, size_t d) { return vec_L2Sqr_256b_T (x, y, d); } #elif defined (USE_SIMD_128) template <typename T> inline float vec_IP_T (const T* x, const T* y, size_t d) { return vec_IP_128b_T (x, y, d); } template <typename T> inline float vec_L2Sqr_T (const T* x, const T* y, size_t d) { return vec_L2Sqr_128b_T (x, y, d); } #else template <typename T> inline float vec_IP_T (const T* x, const T* y, size_t d) { return vec_IP_ref_T (x, y, d); } template <typename T> inline float vec_L2Sqr_T (const T* x, const T* y, size_t d) { return vec_L2Sqr_ref_T (x, y, d); } #endif } #endif #ifdef OPT_FLAT_DTYPE #define FLAT_BATCH_THRESHOLD 4 namespace faiss { //=================================KNN Routine================================ template <typename T, typename D> void knn_less_better_alone_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_maxheap_array_t* res, D& distance) { size_t k = res->k; size_t check_period = InterruptCallback::get_period_hint (ny * d); check_period *= omp_get_max_threads (); for (size_t i0 = 0; i0 < nx; i0 += check_period) { size_t i1 = std::min (i0 + check_period, nx); #pragma omp parallel for for (size_t i = i0; i < i1; i++) { const T* x_i = x + i * d; const T* y_j = y; float* simi = res->get_val (i); int64_t* idxi = res->get_ids (i); maxheap_heapify (k, simi, idxi); for (size_t j = 0; j < ny; j++) { float dis = distance (i, j, x_i, y_j, d); if (dis < simi[0]) { maxheap_pop (k, simi, idxi); maxheap_push (k, simi, idxi, dis, j); } y_j += d; } maxheap_reorder (k, simi, idxi); } InterruptCallback::check (); } } template <typename T, typename D> void knn_greater_better_alone_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_minheap_array_t* res, D& distance) { size_t k = res->k; size_t check_period = InterruptCallback::get_period_hint (ny * d); check_period *= omp_get_max_threads (); for (size_t i0 = 0; i0 < nx; i0 += check_period) { size_t i1 = std::min (i0 + check_period, nx); #pragma omp parallel for for (size_t i = i0; i < i1; i++) { const T* x_i = x + i * d; const T* y_j = y; float* simi = res->get_val (i); int64_t* idxi = res->get_ids (i); minheap_heapify (k, simi, idxi); for (size_t j = 0; j < ny; j++) { float dis = distance (i, j, x_i, y_j, d); if (dis > simi[0]) { minheap_pop (k, simi, idxi); minheap_push (k, simi, idxi, dis, j); } y_j += d; } minheap_reorder (k, simi, idxi); } InterruptCallback::check (); } } template <typename T> inline void knn_inner_product_alone_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_minheap_array_t* res) { struct IP { inline float operator () (size_t /*ix*/, size_t /*jy*/, const T* xi, const T* yj, size_t d) const { return vec_IP_T (xi, yj, d); } } distance; knn_greater_better_alone_T (x, y, d, nx, ny, res, distance); } template <typename T> inline void knn_L2Sqr_alone_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_maxheap_array_t* res) { struct L2Sqr { inline float operator () (size_t /*ix*/, size_t /*jy*/, const T* xi, const T* yj, size_t d) const { return vec_L2Sqr_T (xi, yj, d); } } distance; knn_less_better_alone_T (x, y, d, nx, ny, res, distance); } template <typename T> inline void knn_L2Sqr_expand_alone_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_maxheap_array_t* res, const float* y_norm) { struct L2SqrExpand { const float* y_norm_sqr; inline float operator () (size_t /*ix*/, size_t jy, const T* xi, const T* yj, size_t d) const { return y_norm_sqr[jy] - 2 * vec_IP_T (xi, yj, d); } } distance = { .y_norm_sqr = y_norm, }; knn_less_better_alone_T (x, y, d, nx, ny, res, distance); } template <typename T> inline void knn_inner_product_batch_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_minheap_array_t* res) { knn_inner_product_alone_T (x, y, d, nx, ny, res); } template <typename T> inline void knn_L2Sqr_batch_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_maxheap_array_t* res) { knn_L2Sqr_alone_T (x, y, d, nx, ny, res); } template <typename T> inline void knn_L2Sqr_expand_batch_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_maxheap_array_t* res, const float* y_norm) { knn_L2Sqr_expand_alone_T (x, y, d, nx, ny, res, y_norm); } template <typename H, typename D> void knn_batch_T (const float* x, const float* y, size_t d, size_t nx, size_t ny, H* heap, D& distance) { heap->heapify (); if (nx == 0 || ny == 0) { return; } float* distances = new float [nx * ny]; distance (x, y, d, nx, ny, distances); heap->addn (ny, distances, 0, 0, nx); delete[] distances; InterruptCallback::check (); heap->reorder (); } inline void knn_inner_product_batch_T (const float* x, const float* y, size_t d, size_t nx, size_t ny, float_minheap_array_t* res) { struct IP { inline void operator () (const float* x, const float* y, size_t d, size_t nx, size_t ny, float* distances) const { cblas_sgemm (CblasRowMajor, CblasNoTrans, CblasTrans, nx, ny, d, 1.0f, x, d, y, d, 0.0f, distances, ny); } } distance; knn_batch_T (x, y, d, nx, ny, res, distance); } inline void knn_L2Sqr_expand_batch_T (const float* x, const float* y, size_t d, size_t nx, size_t ny, float_maxheap_array_t* res, const float* y_norm) { struct L2SqrExpand { const float* y_norm; inline void operator () (const float* x, const float* y, size_t d, size_t nx, size_t ny, float* distances) const { float* distances_i = distances; size_t step = ny * sizeof(float); for (size_t i = 0; i < nx; i++) { memcpy (distances_i, y_norm, step); distances_i += ny; } cblas_sgemm (CblasRowMajor, CblasNoTrans, CblasTrans, nx, ny, d, -2.0f, x, d, y, d, 1.0f, distances, ny); } } distance = { .y_norm = y_norm, }; knn_batch_T (x, y, d, nx, ny, res, distance); } template <typename T> inline void knn_inner_product_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_minheap_array_t* res) { if (nx < FLAT_BATCH_THRESHOLD) { knn_inner_product_alone_T (x, y, d, nx, ny, res); } else { knn_inner_product_batch_T (x, y, d, nx, ny, res); } } template <typename T> inline void knn_L2Sqr_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_maxheap_array_t* res) { if (nx < FLAT_BATCH_THRESHOLD) { knn_L2Sqr_alone_T (x, y, d, nx, ny, res); } else { knn_L2Sqr_batch_T (x, y, d, nx, ny, res); } } template <typename T> inline void knn_L2Sqr_expand_T (const T* x, const T* y, size_t d, size_t nx, size_t ny, float_maxheap_array_t* res, const float* y_norm) { if (nx < FLAT_BATCH_THRESHOLD) { knn_L2Sqr_expand_alone_T (x, y, d, nx, ny, res, y_norm); } else { knn_L2Sqr_expand_batch_T (x, y, d, nx, ny, res, y_norm); } } } #endif #ifdef OPT_IVFFLAT_DTYPE #define SCANNER_USE_BATCH false namespace faiss { //===========================Inverted List Scanner============================ template <typename T> class InvertedListScanner_T : public InvertedListScanner { using idx_t = InvertedListScanner::idx_t; protected: size_t d; size_t code_size; bool store_pairs; const T* converted_x; idx_t list_no; public: InvertedListScanner_T (size_t d, bool store_pairs): d (d), code_size (sizeof(T) * d), store_pairs (store_pairs), converted_x(nullptr), list_no(-1) { } virtual ~InvertedListScanner_T () { if (converted_x) { del_converted_x_T (d, converted_x); } } virtual void set_query (const float* query) override { if (converted_x) { del_converted_x_T (d, converted_x); } converted_x = convert_x_T<T> (d, query); } virtual void set_list (idx_t lidx, float) override { list_no = lidx; } virtual float distance_to_code (const uint8_t*) const override { FAISS_THROW_MSG ("not implemented"); } virtual size_t scan_codes (size_t list_size, const uint8_t* codes, const idx_t* ids, float* simi, idx_t* idxi, size_t k) const = 0; }; template <typename T, typename C, typename D> class AloneInvertedListScanner_T : public InvertedListScanner_T<T> { using idx_t = InvertedListScanner::idx_t; using Scanner = InvertedListScanner_T<T>; private: D* distance; public: AloneInvertedListScanner_T (size_t d, bool store_pairs, D* distance): Scanner (d, store_pairs), distance (distance) { } virtual ~AloneInvertedListScanner_T () { delete distance; } virtual size_t scan_codes (size_t list_size, const uint8_t* codes, const idx_t* ids, float* simi, idx_t* idxi, size_t k) const override { size_t nup = 0; for (size_t i = 0; i < list_size; i++) { float dis = (*distance) (Scanner::list_no, i,Scanner::converted_x, (const T*)codes, Scanner::d); codes += Scanner::code_size; if (C::cmp (simi[0], dis)) { heap_pop<C> (k, simi, idxi); int64_t id = Scanner::store_pairs ? lo_build (Scanner::list_no, i) : ids[i]; heap_push<C> (k, simi, idxi, dis, id); nup++; } } return nup; } }; template <typename T> InvertedListScanner* get_IP_alone_scanner_T (size_t d, bool store_pairs) { struct IP { inline float operator () (size_t /*ilist*/, size_t /*jy*/, const T* x, const T* yj, size_t d) const { return vec_IP_T (x, yj, d); } } *distance = new IP; return new AloneInvertedListScanner_T<T, CMin<float, int64_t>, IP> (d, store_pairs, distance); } template <typename T> InvertedListScanner* get_L2Sqr_alone_scanner_T (size_t d, bool store_pairs) { struct L2Sqr { inline float operator () (size_t /*ilist*/, size_t /*jy*/, const T* x, const T* yj, size_t d) const { return vec_L2Sqr_T (x, yj, d); } } *distance = new L2Sqr; return new AloneInvertedListScanner_T<T, CMax<float, int64_t>, L2Sqr> (d, store_pairs, distance); } template <typename T, typename TNorm> InvertedListScanner* get_L2Sqr_expand_alone_scanner_T (size_t d, bool store_pairs, const TNorm y_norm) { struct L2SqrExpand { const TNorm y_norm; inline float operator () (size_t ilist, size_t jy, const T* x, const T* yj, size_t d) { return y_norm [ilist] [jy] - 2 * vec_IP_T (x, yj, d); } } *distance = new L2SqrExpand { .y_norm = y_norm, }; return new AloneInvertedListScanner_T<T, CMax<float, int64_t>, L2SqrExpand> (d, store_pairs, distance); } template <typename T, typename C, typename D> class BatchInvertedListScanner_T : public InvertedListScanner_T<T> { using idx_t = InvertedListScanner::idx_t; using Scanner = InvertedListScanner_T<T>; private: D* distance; public: BatchInvertedListScanner_T (size_t d, bool store_pairs, D* distance): Scanner (d, store_pairs), distance (distance) { } virtual ~BatchInvertedListScanner_T () { delete distance; } virtual size_t scan_codes (size_t list_size, const uint8_t* codes, const idx_t* ids, float* simi, idx_t* idxi, size_t k) const override { float* distances = new float [list_size]; (*distance) (Scanner::converted_x, Scanner::list_no, list_size, (const T*)codes, Scanner::d, distances); size_t nup = 0; for (size_t i = 0; i < list_size; i++) { float dis = distances [i]; if (C::cmp (simi[0], dis)) { heap_pop<C> (k, simi, idxi); int64_t id = Scanner::store_pairs ? lo_build (Scanner::list_no, i) : ids[i]; heap_push<C> (k, simi, idxi, dis, id); nup++; } } delete[] distances; return nup; } }; template <typename T> inline InvertedListScanner* get_IP_batch_scanner_T (size_t d, bool store_pairs, T*) { return get_IP_alone_scanner_T<T> (d, store_pairs); } inline InvertedListScanner* get_IP_batch_scanner_T (size_t d, bool store_pairs, float*) { struct IP { inline void operator () (const float* x, size_t /*ilist*/, size_t list_size, const float* y, size_t d, float* distances) const { cblas_sgemv (CblasRowMajor, CblasNoTrans, list_size, d, 1.0f, y, d, x, 1, 0.0f, distances, 1); } } *distance = new IP; return new BatchInvertedListScanner_T<float, CMin<float, int64_t>, IP> (d, store_pairs, distance); } template <typename T> inline InvertedListScanner* get_L2Sqr_batch_scanner_T (size_t d, bool store_pairs, T*) { return get_L2Sqr_alone_scanner_T<T> (d, store_pairs); } template <typename T, typename TNorm> inline InvertedListScanner* get_L2Sqr_expand_batch_scanner_T (size_t d, bool store_pairs, const TNorm y_norm, T*) { return get_L2Sqr_expand_alone_scanner_T<T> (d, store_pairs, y_norm); } template <typename TNorm> inline InvertedListScanner* get_L2Sqr_expand_batch_scanner_T (size_t d, bool store_pairs, const TNorm y_norm, float*) { struct L2SqrExpand { const TNorm y_norm; inline void operator () (const float* x, size_t ilist, size_t list_size, const float* y, size_t d, float* distances) const { memcpy (distances, &(y_norm [ilist] [0]), list_size * sizeof(float)); cblas_sgemv (CblasRowMajor, CblasNoTrans, list_size, d, -2.0f, y, d, x, 1, 1.0f, distances, 1); } } *distance = new L2SqrExpand { .y_norm = y_norm, }; return new BatchInvertedListScanner_T<float, CMax<float, int64_t>, L2SqrExpand> (d, store_pairs, distance); } template <typename T> inline InvertedListScanner* get_IP_scanner_T (size_t d, bool store_pairs) { if (!SCANNER_USE_BATCH) { return get_IP_alone_scanner_T<T> (d, store_pairs); } else { return get_IP_batch_scanner_T (d, store_pairs, (T*)nullptr); } } template <typename T> inline InvertedListScanner* get_L2Sqr_scanner_T (size_t d, bool store_pairs) { if (!SCANNER_USE_BATCH) { return get_L2Sqr_alone_scanner_T<T> (d, store_pairs); } else { return get_L2Sqr_batch_scanner_T (d, store_pairs, (T*)nullptr); } } template <typename T, typename TNorm> inline InvertedListScanner* get_L2Sqr_expand_scanner_T (size_t d, bool store_pairs, const TNorm y_norm) { if (!SCANNER_USE_BATCH) { return get_L2Sqr_expand_alone_scanner_T<T> (d, store_pairs, y_norm); } else { return get_L2Sqr_expand_batch_scanner_T (d, store_pairs, y_norm, (T*)nullptr); } } } #endif
jac_solv_parfor.c
/* ** PROGRAM: jacobi Solver .. parallel For version ** ** PURPOSE: This program will explore use of a jacobi iterative ** method to solve a system of linear equations (Ax= b). ** ** Here is the basic idea behind the method. Rewrite ** the matrix A as a Lower Triangular (L), upper triangular ** (U) and diagonal matrix (D) ** ** Ax = (L + D + U)x = b ** ** Carry out the multiplication and rearrange: ** ** Dx = b - (L+U)x --> x = (b-(L+U)x)/D ** ** We can do this iteratively ** ** x_new = (b-(L+U)x_old)/D ** ** USAGE: Run wtihout arguments to use default SIZE. ** ** ./jac_solv ** ** Run with a single argument for the order of the A ** matrix ... for example ** ** ./jac_solv 2500 ** ** HISTORY: Written by Tim Mattson, Oct 2015 */ #include<omp.h> #include<math.h> #include "mm_utils.h" //a library of basic matrix utilities functions //and some key constants used in this program //(such as TYPE) #define TOLERANCE 0.001 #define DEF_SIZE 1000 #define MAX_ITERS 5000 #define LARGE 1000000.0 //#define DEBUG 1 // output a small subset of intermediate values //#define VERBOSE 1 int main(int argc, char **argv) { int Ndim; // A[Ndim][Ndim] int i,j, iters; double start_time, elapsed_time; TYPE conv, tmp, err, chksum; TYPE *A, *b, *x1, *x2, *xnew, *xold, *xtmp; // set matrix dimensions and allocate memory for matrices if(argc ==2){ Ndim = atoi(argv[1]); } else{ Ndim = DEF_SIZE; } printf(" jacobi solver parallel for version: ndim = %d\n",Ndim); A = (TYPE *) malloc(Ndim*Ndim*sizeof(TYPE)); b = (TYPE *) malloc(Ndim*sizeof(TYPE)); x1 = (TYPE *) malloc(Ndim*sizeof(TYPE)); x2 = (TYPE *) malloc(Ndim*sizeof(TYPE)); if (!A || !b || !x1 || !x2) { printf("\n memory allocation error\n"); exit(-1); } // generate our diagonally dominant matrix, A init_diag_dom_near_identity_matrix(Ndim, A); #ifdef VERBOSE mm_print(Ndim, Ndim, A); #endif // // Initialize x and just give b some non-zero random values // for(i=0; i<Ndim; i++){ x1[i] = (TYPE)0.0; x2[i] = (TYPE)0.0; b[i] = (TYPE)(rand()%51)/100.0; } start_time = omp_get_wtime(); // // jacobi iterative solver // conv = LARGE; iters = 0; xnew = x1; xold = x2; { // note: i am comparing against the convergence sqaured. This saves a // sqrt and an extra barrier. while((conv > TOLERANCE*TOLERANCE) && (iters<MAX_ITERS)) { { iters++; conv = 0.0; xtmp = xnew; // don't copy arrays. xnew = xold; // just swap pointers. xold = xtmp; } #pragma omp parallel for private(i,j) for (i=0; i<Ndim; i++){ xnew[i] = (TYPE) 0.0; for (j=0; j<Ndim;j++){ // if(i!=j) // xnew[i]+= A[i*Ndim + j]*xold[j]; xnew[i]+= A[i*Ndim + j]*xold[j] * (i != j); } xnew[i] = (b[i]-xnew[i])/A[i*Ndim+i]; } // // test convergence // #pragma omp parallel for private(tmp) reduction(+:conv) for (i=0; i<Ndim; i++){ tmp = xnew[i]-xold[i]; conv += tmp*tmp; } #ifdef DEBUG printf(" conv = %f \n",(float)conv); #endif } } conv = sqrt((double)conv); elapsed_time = omp_get_wtime() - start_time; printf(" Convergence = %g with %d iterations and %f seconds\n", (float)conv, iters, (float)elapsed_time); // // test answer by multiplying my computed value of x by // the input A matrix and comparing the result with the // input b vector. // err = (TYPE) 0.0; chksum = (TYPE) 0.0; for(i=0;i<Ndim;i++){ xold[i] = (TYPE) 0.0; for(j=0; j<Ndim; j++) xold[i] += A[i*Ndim+j]*xnew[j]; tmp = xold[i] - b[i]; #ifdef DEBUG printf(" i=%d, diff = %f, computed b = %f, input b= %f \n", i, (float)tmp, (float)xold[i], (float)b[i]); #endif chksum += xnew[i]; err += tmp*tmp; } err = sqrt((double)err); printf("jacobi solver: err = %f, solution checksum = %f \n", (float)sqrt(err), (float)chksum); free(A); free(b); free(x1); free(x2); }
calculate_discontinuous_distance_to_skin_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pooyan Dadvand // Ruben Zorrilla // // Collaborators: Franziska Wahl // #if !defined(KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED ) #define KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "geometries/plane_3d.h" #include "includes/checks.h" #include "processes/process.h" #include "processes/find_intersected_geometrical_objects_process.h" #include "utilities/variable_utils.h" #include "utilities/pointer_communicator.h" namespace Kratos { ///@addtogroup Kratos Core ///@{ ///@name Kratos Classes ///@{ class KRATOS_API(KRATOS_CORE) CalculateDiscontinuousDistanceToSkinProcessFlags { public: KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_ELEMENTAL_EDGE_DISTANCES); /// Local flag to switch on/off the elemental edge distances storage KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_ELEMENTAL_EDGE_DISTANCES_EXTRAPOLATED); /// Local flag to switch on/off the extrapolated elemental edge distances storage KRATOS_DEFINE_LOCAL_FLAG(USE_POSITIVE_EPSILON_FOR_ZERO_VALUES); /// Local flag to switch from positive (true) to negative (false) epsilon when replacing zero distance values. }; /// This only calculates the distance. Calculating the inside outside should be done by a derived class of this. /** This process takes a volume model part (with tetrahedra mesh) and a skin model part (with triangle mesh) and and calcualtes the distance to the skin for all the elements and nodes of the volume model part. */ template<std::size_t TDim = 3> class KRATOS_API(KRATOS_CORE) CalculateDiscontinuousDistanceToSkinProcess : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of CalculateDiscontinuousDistanceToSkinProcess KRATOS_CLASS_POINTER_DEFINITION(CalculateDiscontinuousDistanceToSkinProcess); ///@} ///@name Life Cycle ///@{ /// Constructor to be used. CalculateDiscontinuousDistanceToSkinProcess( ModelPart& rVolumePart, ModelPart& rSkinPart); /// Constructor with option CalculateDiscontinuousDistanceToSkinProcess( ModelPart& rVolumePart, ModelPart& rSkinPart, const Flags rOptions); /// Destructor. ~CalculateDiscontinuousDistanceToSkinProcess() override; ///@} ///@name Deleted ///@{ /// Default constructor. CalculateDiscontinuousDistanceToSkinProcess() = delete; /// Copy constructor. CalculateDiscontinuousDistanceToSkinProcess(CalculateDiscontinuousDistanceToSkinProcess const& rOther) = delete; /// Assignment operator. CalculateDiscontinuousDistanceToSkinProcess& operator=(CalculateDiscontinuousDistanceToSkinProcess const& rOther) = delete; FindIntersectedGeometricalObjectsProcess mFindIntersectedObjectsProcess; ///@} ///@name Operations ///@{ /** * @brief Initializes discontinuous distance computation process * This method initializes the TO_SPLIT flag, the DISTANCE and * ELEMENTAL_DISTANCES variables as well as the EMBEDDED_VELOCITY */ virtual void Initialize(); /** * @brief Calls the FindIntersectedObjectsProcess to find the intersections * This method calls the FindIntersectedObjectsProcess FindIntersections method. */ virtual void FindIntersections(); /** * @brief Get the array containing the intersecting objects * This method returns an array containing pointers to the intersecting geometries * @return std::vector<PointerVector<GeometricalObject>>& */ virtual std::vector<PointerVector<GeometricalObject>>& GetIntersections(); /** * @brief Computes the elemental distance values * Given an intersecting objects vector, this method computes the elemental distance field * @param rIntersectedObjects array containing pointers to the intersecting geometries */ virtual void CalculateDistances(std::vector<PointerVector<GeometricalObject>>& rIntersectedObjects); /** * @brief Calls the FindIntersectedObjects Clear() method * This method calls the FindIntersectedObjects Clear() to empty the intersecting objects geometries array */ void Clear() override; /** * @brief Executes the CalculateDiscontinuousDistanceToSkinProcess * This method automatically does all the calls required to compute the discontinuous distance function. */ void Execute() override; /** * @brief Calculate embedded variable from skin double specialization * This method calls the specialization method for two double variables * @param rVariable origin double variable in the skin mesh * @param rEmbeddedVariable elemental double variable in the volume mesh to be computed */ void CalculateEmbeddedVariableFromSkin( const Variable<double> &rVariable, const Variable<double> &rEmbeddedVariable); /** * @brief Calculate embedded variable from skin array specialization * This method calls the specialization method for two double variables * @param rVariable origin array variable in the skin mesh * @param rEmbeddedVariable elemental array variable in the volume mesh to be computed */ void CalculateEmbeddedVariableFromSkin( const Variable<array_1d<double,3>> &rVariable, const Variable<array_1d<double,3>> &rEmbeddedVariable); ///@} ///@name Access ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override; /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override; /// Print object's data. void PrintData(std::ostream& rOStream) const override; ///@} protected: ///@name Protected Operations ///@{ /** * @brief Set the Intersection Plane object * This method returns the plane that defines the element intersection. The 2D * case is considered to be a simplification of the 3D one, so a "fake" extra * point is created by extruding the first point in the z-direction. * @param rIntPtsVector array containing the intersecting points coordinates * @return Plane3D the plane defined by the given intersecting points coordinates */ Plane3D SetIntersectionPlane(const std::vector<array_1d<double,3>> &rIntPtsVector); /** * @brief Calculates the domain characteristic length * This method computes the domain characteristic length as the norm of * the diagonal vector that joins the maximum and minimum coordinates * @return double the calculated characteristic length */ double CalculateCharacteristicLength(); ///@} private: ///@name Member Variables ///@{ ModelPart& mrSkinPart; ModelPart& mrVolumePart; Flags mOptions; static const std::size_t mNumNodes = TDim + 1; static const std::size_t mNumEdges = (TDim == 2) ? 3 : 6; const double mZeroToleranceMultiplier = 1e3; bool mDetectedZeroDistanceValues = false; bool mAreNeighboursComputed = false; ///@} ///@name Private Operations ///@{ /** * @brief Computes the discontinuous distance in one element * This method computes the discontinuous distance field for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries */ void CalculateElementalDistances( Element& rElement1, PointerVector<GeometricalObject>& rIntersectedObjects); /** * @brief Computes the discontinuous edge-based distance in one element * This method computes the discontinuous edge-based distance field for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries */ void CalculateElementalAndEdgeDistances( Element& rElement1, PointerVector<GeometricalObject>& rIntersectedObjects); /** * @brief Computes the edges intersections in one element * Provided a list of elemental intersecting geometries, this * method computes the edge intersections for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the average intersection points of the extrapolated geometry * @param rIntersectionPointsArray array containing the edges intersection points * @return unsigned int number of cut edges */ unsigned int ComputeEdgesIntersections( Element& rElement1, const PointerVector<GeometricalObject>& rIntersectedObjects, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, array_1d<double,mNumEdges> &rCutEdgesRatioVector, array_1d<double,mNumEdges> &rCutExtraEdgesRatioVector, std::vector<array_1d <double,3> > &rIntersectionPointsArray); /** * @brief Computes the intersection of a single edge * This method computes the intersection of a given edge with the candidate * intersecting geometry. This operation is performed accordingly to the working * space dimension using the intersection utilities implemented in intersection_utilities.h * @param rIntObjGeometry candidate intersecting geometry * @param rEdgePoint1 edge origin point * @param rEdgePoint2 edge end point * @param rIntersectionPoint intersection point * @return int type of intersection id (see intersection_utilities.h) */ int ComputeEdgeIntersection( const Element::GeometryType& rIntObjGeometry, const Element::NodeType& rEdgePoint1, const Element::NodeType& rEdgePoint2, Point& rIntersectionPoint); /** * @brief Checks if rIntersectionPoint is already present in the * intersection point list in rIntersectionPointsVector for the tolerance rTolerance. * @param rIntersectionPoint reference to the intersection point * @param rIntersectionPointsVector reference to the list of already computed intersected points * @param rEdgeTolerance tolerance to compare two points and assess if they are equal * @return bool if rIntersectionPoint is present in rIntersectionPointsVector */ bool CheckIfPointIsRepeated( const array_1d<double,3>& rIntersectionPoint, const std::vector<array_1d<double,3>>& rIntersectionPointsVector, const double& rEdgeTolerance); /** * @brief Computes the element intersection unit normal * This method computes the element intersection unit normal vector using the distance function gradient. * @param rGeometry reference to the geometry of the element of interest * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values * @param rNormal obtained unit normal vector */ void ComputeIntersectionNormal( const Element::GeometryType& rGeometry, const Vector& rElementalDistances, array_1d<double,3> &rNormal); /** * @brief Computes the nodal distances to the intersection plane * This methods creates a plane from the intersection points and then calculates the nodal distances * to the intersection plane. * In presence of multiple intersections, it performs a least squares approximation of the intersection plane. * @param rElement Element to calculate the ELEMENTAL_DISTANCES * @param rIntersectedObjects Intersected objects container * @param rIntersectionPointsCoordinates The edges intersection points coordinates */ void ComputeIntersectionPlaneElementalDistances( Element& rElement, const PointerVector<GeometricalObject>& rIntersectedObjects, const std::vector<array_1d<double,3>>& rIntersectionPointsCoordinates); /** * @brief Computes the intersection plane approximation * For complex intersection patterns, this method takes a list containing * all the intersecting points and computes the plane that minimizes the * distance from all these points in a least squares sense. The approximated * plane is defined in terms of an origin point and its normal vector. * @param rElement1 reference to the element of interest * @param rPointsCoord list containing the coordinates of al the intersecting points * @param rPlaneBasePointCoords base point defining the approximated plane * @param rPlaneNormal normal vector defining the approximated plane */ void ComputePlaneApproximation( const Element& rElement1, const std::vector< array_1d<double,3> >& rPointsCoord, array_1d<double,3>& rPlaneBasePointCoords, array_1d<double,3>& rPlaneNormal); /** * @brief Computes the elemental distances from the approximation * plane defined by the set of points in rPointVector. * @param rElement reference to the element of interest * @param rElementalDistances reference to the elemental distances container containing the coordinates of al the intersecting points * @param rPoitnVector reference to the vector containing the poits to define the approximation plane */ void ComputeElementalDistancesFromPlaneApproximation( Element& rElement, Vector& rElementalDistances, const std::vector<array_1d<double,3>>& rPointVector); /** * @brief Checks and replaces the values of the ELEMENTAL_DISTANCES vector that are * zero. The values are replaced by an epsilon (whose sign depends on a flag) * that is a fixed factor from the double precision. Can be deactivated by a flag. * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values */ void ReplaceZeroDistances(Vector& rElementalDistances); /** * @brief Checks (and corrects if needed) the intersection normal orientation * This method checks the orientation of the previously computed intersection normal. * To do that, the normal vector to each one of the intersecting geometries is * computed and its directo is compared against the current one. If the negative * votes win, the current normal vector orientation is switched. * @param rGeometry element of interest geometry * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values */ void CorrectDistanceOrientation( const Element::GeometryType& rGeometry, const PointerVector<GeometricalObject>& rIntersectedObjects, Vector& rElementalDistances); /** * @brief Computes the normal vector to an intersecting object geometry * This method computes the normal vector to an intersecting object geometry. * @param rGeometry reference to the geometry of the intersecting object * @param rIntObjNormal reference to the intersecting object normal vector */ void inline ComputeIntersectionNormalFromGeometry( const Element::GeometryType &rGeometry, array_1d<double,3> &rIntObjNormal); /** * @brief Checks if element is incised and then computes the uncut edges intersections of the element * with an averaged and extrapolated geometry. Therefore it calls 'ComputeExtrapolatedGeometryIntersections'. * Note: for uncut or completely cut elements no ratios of the extrapolated geometry will be calculated. * @param rElement reference to the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rNumCutEdges number of cut edges of the element (by the non-extrapolated geometry) * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @param rExtraGeomNormal array as normal vector of the averaged and extrapolated geometry * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the additional * average intersection points of the extrapolated geometry */ void ComputeExtrapolatedEdgesIntersectionsIfIncised( const Element& rElement, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, unsigned int &rNumCutEdges, array_1d<double,mNumEdges>& rCutEdgesRatioVector, array_1d<double,3> &rExtraGeomNormal, array_1d<double,mNumEdges>& rCutExtraEdgesRatioVector); /** * @brief Computes the uncut edges intersections of one element with an averaged and extrapolated geometry. * Therefore it calls 'IntersectionUtilities'. * It saves the edge intersections as ratios of the edge's length in rCutExtraEdgesRatioVector. * @param rElement reference to the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rNumCutEdges number of cut edges of the element * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @param rExtraGeomNormal normal of the averaged and extrapolated geometry * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the additional * average intersection points of the extrapolated geometry */ void ComputeExtrapolatedGeometryIntersections( const Element& rElement, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, unsigned int& rNumCutEdges, array_1d<double,mNumEdges>& rCutEdgesRatioVector, array_1d<double,3>& rExtraGeomNormal, array_1d<double,mNumEdges>& rCutExtraEdgesRatioVector); /** * @brief Converts edge ratios and edge ratios of the extrapolated geometry to elemental (node) distances * @param rElement reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * (ELEMENTAL_EDGE_DISTANCES) * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the additional * average intersection points of the extrapolated geometry (ELEMENTAL_EXTRA_EDGE_DISTANCES) */ void ComputeElementalDistancesFromEdgeRatios( Element& rElement, const PointerVector<GeometricalObject>& rIntersectedObjects, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, const array_1d<double,mNumEdges> &rCutEdgesRatioVector, const array_1d<double,mNumEdges> &rCutExtraEdgesRatioVector); /** * @brief Computes the intersection points from the intersection ratios of the edges of the element of interest * @param rGeometry reference to geometry of the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rEdgeRatiosVector array containing the intersection ratios of an element's edges * @param rIntersectionPointsVector vector containing the intersection point arrays */ void ConvertRatiosToIntersectionPoints( const Element::GeometryType& rGeometry, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, const array_1d<double,mNumEdges> &rEdgeRatiosVector, std::vector<array_1d <double,3> > &rIntersectionPointsVector); /** * @brief Checks whether the edges of an element, which are cut, all share one node * @param rEdge reference to the edge of interest * @param rIntersectionPoint average intersection point at the edge * @return calculated relative positions of the intersection point along the edge from node zero */ double ConvertIntersectionPointToEdgeRatio( const Geometry<Node<3> >& rEdge, const array_1d<double,3>& rIntersectionPoint); /** * @brief Checks whether the edges of an element, which are cut, all share one node * @param rEdge reference to the edge of interest * @param rEdgeRatio relative positions of the intersection point along the edge from node zero * @return rIntersectionPoint calculated average intersection point at the edge */ array_1d<double,3> ConvertEdgeRatioToIntersectionPoint( const Geometry<Node<3> >& rEdge, const double& rEdgeRatio); /** * @brief Checks whether the edges of an element, which are cut, all share one node * @param rElement reference to the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @return boolean true if cut edges share one node */ bool CheckIfCutEdgesShareNode( const Element& rElement, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, const array_1d<double,mNumEdges>& rCutEdgesRatioVector) const; /** * @brief Computes the value of any embedded variable * For a given array variable in the skin mesh, this method calculates the value * of such variable in the embedded mesh. This is done in each element of the volume * mesh by computing the average value of all the edges intersections. This value * is averaged again according to the number of intersected edges. * @tparam TVarType variable type * @param rVariable origin variable in the skin mesh * @param rEmbeddedVariable elemental variable in the volume mesh to be computed */ template<class TVarType> void CalculateEmbeddedVariableFromSkinSpecialization( const Variable<TVarType> &rVariable, const Variable<TVarType> &rEmbeddedVariable) { const auto &r_int_obj_vect= this->GetIntersections(); const int n_elems = mrVolumePart.NumberOfElements(); KRATOS_ERROR_IF((mrSkinPart.NodesBegin())->SolutionStepsDataHas(rVariable) == false) << "Skin model part solution step data missing variable: " << rVariable << std::endl; // Initialize embedded variable value VariableUtils().SetNonHistoricalVariableToZero(rEmbeddedVariable, mrVolumePart.Elements()); // Compute the embedded variable value for each element #pragma omp parallel for schedule(dynamic) for (int i_elem = 0; i_elem < n_elems; ++i_elem) { // Check if the current element has intersecting entities if (r_int_obj_vect[i_elem].size() != 0) { // Initialize the element values unsigned int n_int_edges = 0; auto it_elem = mrVolumePart.ElementsBegin() + i_elem; auto &r_geom = it_elem->GetGeometry(); const auto edges = r_geom.GenerateEdges(); // Loop the element of interest edges for (unsigned int i_edge = 0; i_edge < r_geom.EdgesNumber(); ++i_edge) { // Initialize edge values unsigned int n_int_obj = 0; TVarType i_edge_val = rEmbeddedVariable.Zero(); // Check the edge intersection against all the candidates for (auto &r_int_obj : r_int_obj_vect[i_elem]) { Point intersection_point; const int is_intersected = this->ComputeEdgeIntersection( r_int_obj.GetGeometry(), edges[i_edge][0], edges[i_edge][1], intersection_point); // Compute the variable value in the intersection point if (is_intersected == 1) { n_int_obj++; array_1d<double,3> local_coords; r_int_obj.GetGeometry().PointLocalCoordinates(local_coords, intersection_point); Vector int_obj_N; r_int_obj.GetGeometry().ShapeFunctionsValues(int_obj_N, local_coords); for (unsigned int i_node = 0; i_node < r_int_obj.GetGeometry().PointsNumber(); ++i_node) { i_edge_val += r_int_obj.GetGeometry()[i_node].FastGetSolutionStepValue(rVariable) * int_obj_N[i_node]; } } } // Check if the edge is intersected if (n_int_obj != 0) { // Update the element intersected edges counter n_int_edges++; // Add the average edge value (there might exist cases in where // more than one geometry intersects the edge of interest). it_elem->GetValue(rEmbeddedVariable) += i_edge_val / n_int_obj; } } // Average between all the intersected edges if (n_int_edges != 0) { it_elem->GetValue(rEmbeddedVariable) /= n_int_edges; } } } }; /** * @brief Set the TO_SPLIT Kratos flag * This function sets the TO_SPLIT flag in the provided element according to the ELEMENTAL_DISTANCES values * Note that the zero distance case is avoided by checking the positiveness and negativeness of the nodal values * @param rElement Element to set the TO_SPLIT flag * @param ZeroTolerance Tolerance to check the zero distance values */ void SetToSplitFlag( Element& rElement, const double ZeroTolerance); /** * @brief Checks the elemental edges distances if zero values of the distance * are detected. This ensures that the elementes detected as incised and intersected * are consistent with the zero-correction applied by the process. */ void CheckAndCorrectEdgeDistances(); /** * @brief Creates the global pointer communicator that contains all neighbours elements. In MPI, this * allows to get information from neighbours elements that are not in the same partition. */ GlobalPointerCommunicator<Element>::Pointer CreatePointerCommunicator(); ///@} }; // Class CalculateDiscontinuousDistanceToSkinProcess ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> ( std::istream& rIStream, CalculateDiscontinuousDistanceToSkinProcess<>& rThis); /// output stream function inline std::ostream& operator << ( std::ostream& rOStream, const CalculateDiscontinuousDistanceToSkinProcess<>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED defined
zgemm.c
#include "blas.h" #include "error.h" #include <stdio.h> #include "handle.h" #include "config.h" #include "zgemm.fatbin.c" static inline size_t min(size_t a, size_t b) { return (a < b) ? a : b; } static inline size_t max(size_t a, size_t b) { return (a > b) ? a : b; } static inline CUresult cuMemcpyHtoD2DAsync(CUdeviceptr A, size_t lda, size_t ai, size_t aj, const void * B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_HOST, B, 0, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_DEVICE, NULL, A, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static inline CUresult cuMemcpyDtoH2DAsync(void * A, size_t lda, size_t ai, size_t aj, CUdeviceptr B, size_t ldb, size_t bi, size_t bj, size_t m, size_t n, size_t elemSize, CUstream stream) { CUDA_MEMCPY2D copy = { bi * elemSize, bj, CU_MEMORYTYPE_DEVICE, NULL, B, 0, ldb * elemSize, ai * elemSize, aj, CU_MEMORYTYPE_HOST, A, 0, 0, lda * elemSize, m * elemSize, n }; return cuMemcpy2DAsync(&copy, stream); } static const double complex zero = 0.0 + 0.0 * I; static const double complex one = 1.0 + 0.0 * I; void zgemm(CBlasTranspose transA, CBlasTranspose transB, size_t m, size_t n, size_t k, double complex alpha, const double complex * restrict A, size_t lda, const double complex * restrict B, size_t ldb, double complex beta, double complex * restrict C, size_t ldc) { const size_t nRowA = (transA == CBlasNoTrans) ? m : k; const size_t nRowB = (transB == CBlasNoTrans) ? k : n; int info = 0; if (lda < nRowA) info = 8; else if (ldb < nRowB) info = 10; else if (ldc < m) info = 13; if (info != 0) { XERBLA(info); return; } if (m == 0 || n == 0 || ((alpha == zero || k == 0) && beta == one)) return; if (alpha == zero) { if (beta == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) C[j * ldc + i] = zero; } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) C[j * ldc + i] *= beta; } } return; } if (transB == CBlasNoTrans) { if (transA == CBlasNoTrans) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { if (beta == zero) { for (size_t i = 0; i < m; i++) C[j * ldc + i] = zero; } else if (beta != one) { for (size_t i = 0; i < m; i++) C[j * ldc + i] *= beta; } for (size_t l = 0; l < k; l++) { if (B[j * ldb + l] != zero) { register double complex temp = alpha * B[j * ldb + l]; for (size_t i = 0; i < m; i++) C[j * ldc + i] += temp * A[l * lda + i]; } } } } else if (transA == CBlasConjTrans) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double complex temp = zero; for (size_t l = 0; l < k; l++) temp += conj(A[i * lda + l]) * B[j * ldb + l]; if (beta == zero) C[j * ldc + i] = alpha * temp; else C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i]; } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double complex temp = zero; for (size_t l = 0; l < k; l++) temp += A[i * lda + l] * B[j * ldb + l]; if (beta == zero) C[j * ldc + i] = alpha * temp; else C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i]; } } } } else if (transB == CBlasConjTrans) { if (transA == CBlasNoTrans) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { if (beta == zero) { for (size_t i = 0; i < m; i++) C[j * ldc + i] = zero; } else if (beta != one) { for (size_t i = 0; i < m; i++) C[j * ldc + i] *= beta; } for (size_t l = 0; l < k; l++) { if (B[l * ldb + j] != zero) { register double complex temp = alpha * conj(B[l * ldb + j]); for (size_t i = 0; i < m; i++) C[j * ldc + i] += temp * A[l * lda + i]; } } } } else if (transA == CBlasConjTrans) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double complex temp = zero; for (size_t l = 0; l < k; l++) temp += conj(A[i * lda + l]) * conj(B[l * ldb + j]); if (beta == zero) C[j * ldc + i] = alpha * temp; else C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i]; } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double complex temp = zero; for (size_t l = 0; l < k; l++) temp += A[i * lda + l] * conj(B[l * ldb + j]); if (beta == zero) C[j * ldc + i] = alpha * temp; else C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i]; } } } } else { if (transA == CBlasNoTrans) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { if (beta == zero) { for (size_t i = 0; i < m; i++) C[j * ldc + i] = zero; } else if (beta != one) { for (size_t i = 0; i < m; i++) C[j * ldc + i] *= beta; } for (size_t l = 0; l < k; l++) { if (B[l * ldb + j] != zero) { register double complex temp = alpha * B[l * ldb + j]; for (size_t i = 0; i < m; i++) C[j * ldc + i] += temp * A[l * lda + i]; } } } } else if (transA == CBlasConjTrans) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double complex temp = zero; for (size_t l = 0; l < k; l++) temp += conj(A[i * lda + l]) * B[l * ldb + j]; if (beta == zero) C[j * ldc + i] = alpha * temp; else C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i]; } } } else { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) { register double complex temp = zero; for (size_t l = 0; l < k; l++) temp += A[i * lda + l] * B[l * ldb + j]; if (beta == zero) C[j * ldc + i] = alpha * temp; else C[j * ldc + i] = alpha * temp + beta * C[j * ldc + i]; } } } } } CUresult cuZgemm2(CUBLAShandle handle, CBlasTranspose transA, CBlasTranspose transB, size_t m, size_t n, size_t k, double complex alpha, CUdeviceptr A, size_t lda, CUdeviceptr B, size_t ldb, double complex beta, CUdeviceptr C, size_t ldc, CUdeviceptr D, size_t ldd, CUstream stream) { const size_t nRowA = (transA == CBlasNoTrans) ? m : k; const size_t nRowB = (transB == CBlasNoTrans) ? k : n; int info = 0; if (lda < nRowA) info = 8; else if (ldb < nRowB) info = 10; else if (ldc < m) info = 13; else if (ldd < m) info = 15; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0 || (C == D && (alpha == zero || k == 0) && beta == one)) return CUDA_SUCCESS; CU_ERROR_CHECK(cuCtxPushCurrent(handle->context)); if (handle->zgemm2 == NULL) CU_ERROR_CHECK(cuModuleLoadData(&handle->zgemm2, imageBytes)); unsigned int mb, nb, kb, bx, by; char name[96]; if (transA == CBlasNoTrans) { mb = 64; nb = 4; kb = 16; bx = (transB == CBlasNoTrans) ? 16 : 4; by = (transB == CBlasNoTrans) ? 4 : 16; snprintf(name, 91, "_Z7zgemm2NIL14CBlasTranspose%dELj64ELj4ELj16ELj%uELj%uEEv7double2S1_PKS1_S3_S3_PS1_iiiiiii", transB, bx, by); } else { mb = 8; nb = (transB == CBlasNoTrans) ? 8 : 16; kb = (transB == CBlasNoTrans) ? 4 : 8; bx = (transB == CBlasNoTrans) ? 4 : 8; by = 8; snprintf(name, 96, "_Z7zgemm2TIL14CBlasTranspose%dELS0_%dELj8ELj%uELj%uELj%uELj8EEv7double2S1_PKS1_S3_S3_PS1_iiiiiii", transA, transB, nb, kb, bx); } CUfunction function; CU_ERROR_CHECK(cuModuleGetFunction(&function, handle->zgemm2, name)); void * params[] = { &A, &B, &C, &D, &alpha, &beta, &lda, &ldb, &ldc, &ldd, &m, &n, &k }; CU_ERROR_CHECK(cuLaunchKernel(function, (unsigned int)(m + mb - 1) / mb, (unsigned int)(n + nb - 1) / nb, 1, bx, by, 1, 0, stream, params, NULL)); CU_ERROR_CHECK(cuCtxPopCurrent(&handle->context)); return CUDA_SUCCESS; } struct zgemm_args { CUBLAShandle handle; const double complex * A, * B; double complex * C; size_t m, n, k, lda, ldb, ldc; double complex alpha, beta; CBlasTranspose transA, transB; }; static CUresult background_zgemm(const void * a) { struct zgemm_args * args = (struct zgemm_args *)a; CUBLAShandle handle = args->handle; const size_t mb = (args->transA == CBlasNoTrans) ? ZGEMM_N_MB : ((args->transB == CBlasNoTrans) ? ZGEMM_CN_MB : ZGEMM_CC_MB); const size_t nb = (args->transA == CBlasNoTrans) ? ZGEMM_N_NB : ((args->transB == CBlasNoTrans) ? ZGEMM_CN_NB : ZGEMM_CC_NB); const size_t kb = (args->transA == CBlasNoTrans) ? ZGEMM_N_KB : ((args->transB == CBlasNoTrans) ? ZGEMM_CN_KB : ZGEMM_CC_KB); // Temporary device memory and streams CUdeviceptr A0, A1, B0, B1, C; size_t lda, ldb, ldc; CUstream copy, compute; // Allocate two matrices for blocks of A and B on the device and one for a // block of C if (args->transA == CBlasNoTrans) { CU_ERROR_CHECK(cuMemAllocPitch(&A0, &lda, mb * sizeof(double complex), kb, sizeof(double complex))); CU_ERROR_CHECK(cuMemAllocPitch(&A1, &lda, mb * sizeof(double complex), kb, sizeof(double complex))); } else { CU_ERROR_CHECK(cuMemAllocPitch(&A0, &lda, kb * sizeof(double complex), mb, sizeof(double complex))); CU_ERROR_CHECK(cuMemAllocPitch(&A1, &lda, kb * sizeof(double complex), mb, sizeof(double complex))); } lda /= sizeof(double complex); if (args->transB == CBlasNoTrans) { CU_ERROR_CHECK(cuMemAllocPitch(&B0, &ldb, kb * sizeof(double complex), nb, sizeof(double complex))); CU_ERROR_CHECK(cuMemAllocPitch(&B1, &ldb, kb * sizeof(double complex), nb, sizeof(double complex))); } else { CU_ERROR_CHECK(cuMemAllocPitch(&B0, &ldb, nb * sizeof(double complex), kb, sizeof(double complex))); CU_ERROR_CHECK(cuMemAllocPitch(&B1, &ldb, nb * sizeof(double complex), kb, sizeof(double complex))); } ldb /= sizeof(double complex); CU_ERROR_CHECK(cuMemAllocPitch(&C, &ldc, mb * sizeof(double complex), nb, sizeof(double complex))); ldc /= sizeof(double complex); // Create streams CU_ERROR_CHECK(cuStreamCreate(&copy, CU_STREAM_NON_BLOCKING)); CU_ERROR_CHECK(cuStreamCreate(&compute, CU_STREAM_NON_BLOCKING)); // Copy C onto the device using the compute stream CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(C, ldc, 0, 0, args->C, args->ldc, 0, 0, args->m, args->n, sizeof(double complex), compute)); // Perform C *= beta on the compute stream to ensure C has finished copying CU_ERROR_CHECK(cuZgemm(handle, CBlasNoTrans, CBlasNoTrans, args->m, args->n, 0, zero, 0, ldc, 0, 0, args->beta, C, ldc, compute)); // Can exit early if alpha * op(A) * op(B) will evaluate to zero if (args->alpha != zero && args->k > 0) { // Perform C += alpha * op(A) * op(B) if (args->transB == CBlasNoTrans) { if (args->transA == CBlasNoTrans) { // Copy A and B onto the device asynchronously on the same stream as C const size_t lb = min(args->k, kb); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A0, lda, 0, 0, args->A, args->lda, 0, 0, args->m, lb, sizeof(double complex), compute)); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B0, ldb, 0, 0, args->B, args->ldb, 0, 0, lb, args->n, sizeof(double complex), compute)); for (size_t l = 0; l < args->k; l += kb) { // Compute C on the same stream as the copies to ensure they have finished first CU_ERROR_CHECK(cuZgemm(handle, args->transA, args->transB, args->m, args->n, min(args->k - l, kb), args->alpha, A0, lda, B0, ldb, one, C, ldc, compute)); // If there is more work to do if (l + kb < args->k) { const size_t lb = min(args->k - l - kb, kb); // Copy the next blocks of A and B on the opposite stream from the zgemm CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A1, lda, 0, 0, args->A, args->lda, 0, l + kb, args->m, lb, sizeof(double complex), copy)); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B1, ldb, 0, 0, args->B, args->ldb, l + kb, 0, lb, args->n, sizeof(double complex), copy)); // Swap the streams and pointers so that the compute starts after the copy CUstream stream = compute; compute = copy; copy = stream; CUdeviceptr ptr = A0; A0 = A1; A1 = ptr; ptr = B0; B0 = B1; B1 = ptr; } } } else { // Copy A and B onto the device asynchronously on the same stream as C const size_t lb = min(args->k, kb); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A0, lda, 0, 0, args->A, args->lda, 0, 0, lb, args->m, sizeof(double complex), compute)); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B0, ldb, 0, 0, args->B, args->ldb, 0, 0, lb, args->n, sizeof(double complex), compute)); for (size_t l = 0; l < args->k; l += kb) { // Compute C on the same stream as the copies to ensure they have finished first CU_ERROR_CHECK(cuZgemm(handle, args->transA, args->transB, args->m, args->n, min(args->k - l, kb), args->alpha, A0, lda, B0, ldb, one, C, ldc, compute)); // If there is more work to do if (l + kb < args->k) { const size_t lb = min(args->k - l - kb, kb); // Copy the next blocks of A and B on the opposite stream from the zgemm CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A1, lda, 0, 0, args->A, args->lda, l + kb, 0, lb, args->m, sizeof(double complex), copy)); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B1, ldb, 0, 0, args->B, args->ldb, l + kb, 0, lb, args->n, sizeof(double complex), copy)); // Swap the streams and pointers so that the compute starts after the copy CUstream stream = compute; compute = copy; copy = stream; CUdeviceptr ptr = A0; A0 = A1; A1 = ptr; ptr = B0; B0 = B1; B1 = ptr; } } } } else { if (args->transA == CBlasNoTrans) { // Copy A and B onto the device asynchronously on the same stream as C const size_t lb = min(args->k, kb); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A0, lda, 0, 0, args->A, args->lda, 0, 0, args->m, lb, sizeof(double complex), compute)); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B0, ldb, 0, 0, args->B, args->ldb, 0, 0, args->n, lb, sizeof(double complex), compute)); for (size_t l = 0; l < args->k; l += kb) { // Compute C on the same stream as the copies to ensure they have finished first CU_ERROR_CHECK(cuZgemm(handle, args->transA, args->transB, args->m, args->n, min(args->k - l, kb), args->alpha, A0, lda, B0, ldb, one, C, ldc, compute)); // If there is more work to do if (l + kb < args->k) { const size_t lb = min(args->k - l - kb, kb); // Copy the next blocks of A and B on the opposite stream from the zgemm CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A1, lda, 0, 0, args->A, args->lda, 0, l + kb, args->m, lb, sizeof(double complex), copy)); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B1, ldb, 0, 0, args->B, args->ldb, 0, l + kb, args->n, lb, sizeof(double complex), copy)); // Swap the streams and pointers so that the compute starts after the copy CUstream stream = compute; compute = copy; copy = stream; CUdeviceptr ptr = A0; A0 = A1; A1 = ptr; ptr = B0; B0 = B1; B1 = ptr; } } } else { // Copy A and B onto the device asynchronously on the same stream as C const size_t lb = min(args->k, kb); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A0, lda, 0, 0, args->A, args->lda, 0, 0, lb, args->m, sizeof(double complex), compute)); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B0, ldb, 0, 0, args->B, args->ldb, 0, 0, args->n, lb, sizeof(double complex), compute)); for (size_t l = 0; l < args->k; l += kb) { // Compute C on the same stream as the copies to ensure they have finished first CU_ERROR_CHECK(cuZgemm(handle, args->transA, args->transB, args->m, args->n, min(args->k - l, kb), args->alpha, A0, lda, B0, ldb, one, C, ldc, compute)); // If there is more work to do if (l + kb < args->k) { const size_t lb = min(args->k - l - kb, kb); // Copy the next blocks of A and B on the opposite stream from the zgemm CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(A1, lda, 0, 0, args->A, args->lda, l + kb, 0, lb, args->m, sizeof(double complex), copy)); CU_ERROR_CHECK(cuMemcpyHtoD2DAsync(B1, ldb, 0, 0, args->B, args->ldb, 0, l + kb, args->n, lb, sizeof(double complex), copy)); // Swap the streams and pointers so that the compute starts after the copy CUstream stream = compute; compute = copy; copy = stream; CUdeviceptr ptr = A0; A0 = A1; A1 = ptr; ptr = B0; B0 = B1; B1 = ptr; } } } } } // Copy C back onto the host on the compute stream CU_ERROR_CHECK(cuMemcpyDtoH2DAsync(args->C, args->ldc, 0, 0, C, ldc, 0, 0, args->m, args->n, sizeof(double complex), compute)); // Clean up temporary memory and streams CU_ERROR_CHECK(cuMemFree(A0)); CU_ERROR_CHECK(cuMemFree(A1)); CU_ERROR_CHECK(cuMemFree(B0)); CU_ERROR_CHECK(cuMemFree(B1)); CU_ERROR_CHECK(cuMemFree(C)); CU_ERROR_CHECK(cuStreamDestroy(copy)); CU_ERROR_CHECK(cuStreamDestroy(compute)); return CUDA_SUCCESS; } CUresult cuMultiGPUZgemm(CUmultiGPUBLAShandle handle, CBlasTranspose transA, CBlasTranspose transB, size_t m, size_t n, size_t k, double complex alpha, const double complex * restrict A, size_t lda, const double complex * restrict B, size_t ldb, double complex beta, double complex * restrict C, size_t ldc) { const size_t nRowA = (transA == CBlasNoTrans) ? m : k; const size_t nRowB = (transB == CBlasNoTrans) ? k : n; int info = 0; if (lda < nRowA) info = 8; else if (ldb < nRowB) info = 10; else if (ldc < m) info = 13; if (info != 0) { XERBLA(info); return CUDA_ERROR_INVALID_VALUE; } if (m == 0 || n == 0 || ((alpha == zero || k == 0) && beta == one)) return CUDA_SUCCESS; if (alpha == zero) { if (beta == zero) { #pragma omp parallel for for (size_t j = 0; j < n; j++) { for (size_t i = 0; i < m; i++) C[j * ldc + i] = zero; } } else { for (size_t j = 0; j < n; j++) { #pragma omp parallel for for (size_t i = 0; i < m; i++) C[j * ldc + i] *= beta; } } return CUDA_SUCCESS; } const size_t mb = (transA == CBlasNoTrans) ? ZGEMM_N_MB : ((transB == CBlasNoTrans) ? ZGEMM_CN_MB : ZGEMM_CC_MB); const size_t nb = (transA == CBlasNoTrans) ? ZGEMM_N_NB : ((transB == CBlasNoTrans) ? ZGEMM_CN_NB : ZGEMM_CC_NB); if (m < mb && n < nb) { zgemm(transA, transB, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); return CUDA_SUCCESS; } int task = 0, nTasks = (int)(((m + mb - 1) / mb) * ((n + nb - 1) / nb)); CUtask tasks[nTasks]; int ctx = 0; int nCtxs = cuMultiGPUGetContextCount(handle->mGPU); struct zgemm_args args = { .transA = transA, .transB = transB, .k = k, .alpha = alpha, .lda = lda, .ldb = ldb, .beta = beta, .ldc = ldc }; if (transB == CBlasNoTrans) { if (transA == CBlasNoTrans) { for (size_t j = 0; j < n; j += nb) { args.n = min(n - j, nb); for (size_t i = 0; i < m; i += mb) { args.m = min(m - i, mb); args.A = &A[i]; args.B = &B[j * ldb]; args.C = &C[j * ldc + i]; args.handle = &handle->handles[ctx]; CU_ERROR_CHECK(cuTaskCreate(&tasks[task], background_zgemm, &args, sizeof(struct zgemm_args))); CU_ERROR_CHECK(cuMultiGPURunTask(handle->mGPU, ctx++, tasks[task++])); if (ctx == nCtxs) ctx = 0; } } } else { for (size_t j = 0; j < n; j += nb) { args.n = min(n - j, nb); for (size_t i = 0; i < m; i += mb) { args.m = min(m - i, mb); args.A = &A[i * lda]; args.B = &B[j * ldb]; args.C = &C[j * ldc + i]; args.handle = &handle->handles[ctx]; CU_ERROR_CHECK(cuTaskCreate(&tasks[task], background_zgemm, &args, sizeof(struct zgemm_args))); CU_ERROR_CHECK(cuMultiGPURunTask(handle->mGPU, ctx++, tasks[task++])); if (ctx == nCtxs) ctx = 0; } } } } else { if (transA == CBlasNoTrans) { for (size_t j = 0; j < n; j += nb) { args.n = min(n - j, nb); for (size_t i = 0; i < m; i += mb) { args.m = min(m - i, mb); args.A = &A[i]; args.B = &B[j]; args.C = &C[j * ldc + i]; args.handle = &handle->handles[ctx]; CU_ERROR_CHECK(cuTaskCreate(&tasks[task], background_zgemm, &args, sizeof(struct zgemm_args))); CU_ERROR_CHECK(cuMultiGPURunTask(handle->mGPU, ctx++, tasks[task++])); if (ctx == nCtxs) ctx = 0; } } } else { for (size_t j = 0; j < n; j += nb) { args.n = min(n - j, nb); for (size_t i = 0; i < m; i += mb) { args.m = min(m - i, mb); args.A = &A[i * lda]; args.B = &B[j]; args.C = &C[j * ldc + i]; args.handle = &handle->handles[ctx]; CU_ERROR_CHECK(cuTaskCreate(&tasks[task], background_zgemm, &args, sizeof(struct zgemm_args))); CU_ERROR_CHECK(cuMultiGPURunTask(handle->mGPU, ctx++, tasks[task++])); if (ctx == nCtxs) ctx = 0; } } } } CUresult result; for (task = 0; task < nTasks; task++) CU_ERROR_CHECK(cuTaskDestroy(tasks[task], &result)); return result; }
flops_SSE2.h
/* flops_SSE2.h - SSE2 Benchmarks * * Author : Alexander J. Yee * Date Created : 10/21/2011 * Last Modified : 01/25/2012 * * * * And of course... The typical copyright stuff... * * Redistribution of this program in both source or binary, regardless of * form, with or without modification is permitted as long as the following * conditions are met: * 1. This copyright notice is maintained either inline in the source * or distributed with the binary. * 2. A list of all contributing authors along with their contributions * is included either inline in the source or distributed with the * binary. * 3. The following disclaimer is maintained either inline in the * source or distributed with the binary. * * Disclaimer: * This software is provided "as is", without any guarantee made to its * suitability or fitness for any particular use. It may contain bugs so use * of this program is at your own risk. I take no responsibility for any * damage that may unintentionally be caused through its use. */ #ifndef _SSE2_h #define _SSE2_h #include <emmintrin.h> #include "flops.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// double test_dp_add_SSE2_internal(double x, double y, size_t iterations){ register __m128d r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, rA, rB; r0 = _mm_set1_pd(x); r1 = _mm_set1_pd(y); r8 = _mm_set1_pd(-0.0); r9 = _mm_set1_pd(0.5); r2 = _mm_xor_pd(r0, r8); r3 = _mm_or_pd(r0, r8); r4 = _mm_andnot_pd(r8, r0); r5 = _mm_mul_pd(r1, r9); r6 = _mm_add_pd(r1, r9); r7 = _mm_sub_pd(r1, r9); r8 = _mm_add_pd(r0, _mm_set1_pd(2.3)); r9 = _mm_sub_pd(r1, _mm_set1_pd(2.3)); uint64 iMASK = 0x800fffffffffffffull; __m128d MASK = _mm_set1_pd(*(double*)&iMASK); __m128d vONE = _mm_set1_pd(1.0); rA = _mm_set1_pd(0.1); rB = _mm_set1_pd(0.1001); // wclk start = wclk_now(); size_t c = 0; while (c < iterations){ size_t i = 0; while (i < 1000){ r0 = _mm_add_pd(r0, rA); r1 = _mm_add_pd(r1, rA); r2 = _mm_add_pd(r2, rA); r3 = _mm_add_pd(r3, rA); r4 = _mm_add_pd(r4, rA); r5 = _mm_add_pd(r5, rA); r6 = _mm_add_pd(r6, rA); r7 = _mm_add_pd(r7, rA); r8 = _mm_add_pd(r8, rA); r9 = _mm_add_pd(r9, rA); r0 = _mm_sub_pd(r0, rB); r1 = _mm_sub_pd(r1, rB); r2 = _mm_sub_pd(r2, rB); r3 = _mm_sub_pd(r3, rB); r4 = _mm_sub_pd(r4, rB); r5 = _mm_sub_pd(r5, rB); r6 = _mm_sub_pd(r6, rB); r7 = _mm_sub_pd(r7, rB); r8 = _mm_sub_pd(r8, rB); r9 = _mm_sub_pd(r9, rB); //r8 = _mm_add_pd(r0, r1); //r9 = _mm_add_pd(r2, r3); //rA = _mm_add_pd(r4, r5); //rB = _mm_add_pd(r6, r7); //r0 = _mm_sub_pd(r0, r4); //r1 = _mm_sub_pd(r1, r5); //r2 = _mm_sub_pd(r2, r6); //r3 = _mm_sub_pd(r3, r7); //r4 = _mm_add_pd(r4, r8); //r5 = _mm_add_pd(r5, r9); //r6 = _mm_add_pd(r6, rA); //r7 = _mm_add_pd(r7, rB); i++; } //print(r0); //print(r1); //print(r2); //print(r3); //print(r4); //print(r5); //print(r6); //print(r7); //cout << endl; r0 = _mm_and_pd(r0, MASK); r1 = _mm_and_pd(r1, MASK); r2 = _mm_and_pd(r2, MASK); r3 = _mm_and_pd(r3, MASK); r4 = _mm_and_pd(r4, MASK); r5 = _mm_and_pd(r5, MASK); r6 = _mm_and_pd(r6, MASK); r7 = _mm_and_pd(r7, MASK); r8 = _mm_and_pd(r8, MASK); r9 = _mm_and_pd(r9, MASK); r0 = _mm_or_pd(r0, vONE); r1 = _mm_or_pd(r1, vONE); r2 = _mm_or_pd(r2, vONE); r3 = _mm_or_pd(r3, vONE); r4 = _mm_or_pd(r4, vONE); r5 = _mm_or_pd(r5, vONE); r6 = _mm_or_pd(r6, vONE); r7 = _mm_or_pd(r7, vONE); r8 = _mm_or_pd(r8, vONE); r9 = _mm_or_pd(r9, vONE); c++; } // wclk end = wclk_now(); // double secs = wclk_secs_since(start); // uint64 ops = 12 * 1000 * c * 2; // cout << "Seconds = " << secs << endl; // cout << "FP Ops = " << ops << endl; // cout << "FLOPs = " << ops / secs << endl; r0 = _mm_add_pd(r0, r1); r2 = _mm_add_pd(r2, r3); r4 = _mm_add_pd(r4, r5); r6 = _mm_add_pd(r6, r7); r8 = _mm_add_pd(r8, r9); r0 = _mm_add_pd(r0, r2); r4 = _mm_add_pd(r4, r6); r0 = _mm_add_pd(r0, r4); r0 = _mm_add_pd(r0, r8); double out = 0; __m128d tmp = r0; out += ((double*)&tmp)[0]; out += ((double*)&tmp)[1]; return out; } void test_dp_add_SSE2(int tds, size_t iterations){ printf("Testing SSE2 Add:\n"); double *sum = (double*)malloc(tds * sizeof(double)); wclk start = wclk_now(); #pragma omp parallel num_threads(tds) { double ret = test_dp_add_SSE2_internal(1.1, 2.1, iterations); sum[omp_get_thread_num()] = ret; } double secs = wclk_secs_since(start); uint64 ops = 20 * 1000 * iterations * tds * 2; printf("Seconds = %g\n", secs); printf("FP Ops = %llu\n", (unsigned long long)ops); printf("FLOPs = %g\n", ops / secs); double out = 0; int c = 0; while (c < tds){ out += sum[c++]; } printf("sum = %g\n\n", out); free(sum); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// double test_dp_mul_SSE2_internal(double x, double y, size_t iterations){ register __m128d r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, rA, rB; r0 = _mm_set1_pd(x); r1 = _mm_set1_pd(y); r8 = _mm_set1_pd(-0.0); r2 = _mm_xor_pd(r0, r8); r3 = _mm_or_pd(r0, r8); r4 = _mm_andnot_pd(r8, r0); r5 = _mm_mul_pd(r1, _mm_set1_pd(0.37796447300922722721)); r6 = _mm_mul_pd(r1, _mm_set1_pd(0.24253562503633297352)); r7 = _mm_mul_pd(r1, _mm_set1_pd(4.1231056256176605498)); r8 = _mm_add_pd(r0, _mm_set1_pd(2.3)); r9 = _mm_sub_pd(r1, _mm_set1_pd(2.3)); // r8 = _mm_set1_pd(1.4142135623730950488); // r9 = _mm_set1_pd(1.7320508075688772935); // rA = _mm_set1_pd(0.57735026918962576451); // rB = _mm_set1_pd(0.70710678118654752440); rA = _mm_set1_pd(1.4142135623730950488); rB = _mm_set1_pd(0.70710678118654752440); uint64 iMASK = 0x800fffffffffffffull; __m128d MASK = _mm_set1_pd(*(double*)&iMASK); __m128d vONE = _mm_set1_pd(1.0); size_t c = 0; while (c < iterations){ size_t i = 0; while (i < 1000){ r0 = _mm_mul_pd(r0, rA); r1 = _mm_mul_pd(r1, rA); r2 = _mm_mul_pd(r2, rA); r3 = _mm_mul_pd(r3, rA); r4 = _mm_mul_pd(r4, rA); r5 = _mm_mul_pd(r5, rA); r6 = _mm_mul_pd(r6, rA); r7 = _mm_mul_pd(r7, rA); r8 = _mm_mul_pd(r8, rA); r9 = _mm_mul_pd(r9, rA); r0 = _mm_mul_pd(r0, rB); r1 = _mm_mul_pd(r1, rB); r2 = _mm_mul_pd(r2, rB); r3 = _mm_mul_pd(r3, rB); r4 = _mm_mul_pd(r4, rB); r5 = _mm_mul_pd(r5, rB); r6 = _mm_mul_pd(r6, rB); r7 = _mm_mul_pd(r7, rB); r8 = _mm_mul_pd(r8, rB); r9 = _mm_mul_pd(r9, rB); i++; } //print(r0); //print(r1); //print(r2); //print(r3); //print(r4); //print(r5); //print(r6); //print(r7); //cout << endl; r0 = _mm_and_pd(r0, MASK); r1 = _mm_and_pd(r1, MASK); r2 = _mm_and_pd(r2, MASK); r3 = _mm_and_pd(r3, MASK); r4 = _mm_and_pd(r4, MASK); r5 = _mm_and_pd(r5, MASK); r6 = _mm_and_pd(r6, MASK); r7 = _mm_and_pd(r7, MASK); r8 = _mm_and_pd(r8, MASK); r9 = _mm_and_pd(r9, MASK); r0 = _mm_or_pd(r0, vONE); r1 = _mm_or_pd(r1, vONE); r2 = _mm_or_pd(r2, vONE); r3 = _mm_or_pd(r3, vONE); r4 = _mm_or_pd(r4, vONE); r5 = _mm_or_pd(r5, vONE); r6 = _mm_or_pd(r6, vONE); r7 = _mm_or_pd(r7, vONE); r8 = _mm_or_pd(r8, vONE); r9 = _mm_or_pd(r9, vONE); c++; } // wclk end = wclk_now(); // double secs = wclk_secs_since(start); // uint64 ops = 12 * 1000 * c * 2; // cout << "Seconds = " << secs << endl; // cout << "FP Ops = " << ops << endl; // cout << "FLOPs = " << ops / secs << endl; r0 = _mm_add_pd(r0, r1); r2 = _mm_add_pd(r2, r3); r4 = _mm_add_pd(r4, r5); r6 = _mm_add_pd(r6, r7); r8 = _mm_add_pd(r8, r9); r0 = _mm_add_pd(r0, r2); r4 = _mm_add_pd(r4, r6); r0 = _mm_add_pd(r0, r4); r0 = _mm_add_pd(r0, r8); double out = 0; __m128d tmp = r0; out += ((double*)&tmp)[0]; out += ((double*)&tmp)[1]; return out; } void test_dp_mul_SSE2(int tds, size_t iterations){ printf("Testing SSE2 Mul:\n"); double *sum = (double*)malloc(tds * sizeof(double)); wclk start = wclk_now(); #pragma omp parallel num_threads(tds) { double ret = test_dp_mul_SSE2_internal(1.1, 2.1, iterations); sum[omp_get_thread_num()] = ret; } double secs = wclk_secs_since(start); uint64 ops = 20 * 1000 * iterations * tds * 2; printf("Seconds = %g\n", secs); printf("FP Ops = %llu\n", (unsigned long long)ops); printf("FLOPs = %g\n", ops / secs); double out = 0; int c = 0; while (c < tds){ out += sum[c++]; } printf("sum = %g\n\n", out); free(sum); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// double test_dp_mac_SSE2_internal(double x, double y, size_t iterations){ register __m128d r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, rA, rB, rC, rD, rE, rF; r0 = _mm_set1_pd(x); r1 = _mm_set1_pd(y); r8 = _mm_set1_pd(-0.0); r2 = _mm_xor_pd(r0, r8); r3 = _mm_or_pd(r0, r8); r4 = _mm_andnot_pd(r8, r0); r5 = _mm_mul_pd(r1, _mm_set1_pd(0.37796447300922722721)); r6 = _mm_mul_pd(r1, _mm_set1_pd(0.24253562503633297352)); r7 = _mm_mul_pd(r1, _mm_set1_pd(4.1231056256176605498)); r8 = _mm_add_pd(r0, _mm_set1_pd(0.37796447300922722721)); r9 = _mm_add_pd(r1, _mm_set1_pd(0.24253562503633297352)); rA = _mm_sub_pd(r0, _mm_set1_pd(4.1231056256176605498)); rB = _mm_sub_pd(r1, _mm_set1_pd(4.1231056256176605498)); rC = _mm_set1_pd(1.4142135623730950488); rD = _mm_set1_pd(1.7320508075688772935); rE = _mm_set1_pd(0.57735026918962576451); rF = _mm_set1_pd(0.70710678118654752440); uint64 iMASK = 0x800fffffffffffffull; __m128d MASK = _mm_set1_pd(*(double*)&iMASK); __m128d vONE = _mm_set1_pd(1.0); size_t c = 0; while (c < iterations){ size_t i = 0; while (i < 1000){ r0 = _mm_mul_pd(r0, rC); r1 = _mm_add_pd(r1, rD); r2 = _mm_mul_pd(r2, rE); r3 = _mm_sub_pd(r3, rF); r4 = _mm_mul_pd(r4, rC); r5 = _mm_add_pd(r5, rD); r6 = _mm_mul_pd(r6, rE); r7 = _mm_sub_pd(r7, rF); r8 = _mm_mul_pd(r8, rC); r9 = _mm_add_pd(r9, rD); rA = _mm_mul_pd(rA, rE); rB = _mm_sub_pd(rB, rF); r0 = _mm_add_pd(r0, rF); r1 = _mm_mul_pd(r1, rE); r2 = _mm_sub_pd(r2, rD); r3 = _mm_mul_pd(r3, rC); r4 = _mm_add_pd(r4, rF); r5 = _mm_mul_pd(r5, rE); r6 = _mm_sub_pd(r6, rD); r7 = _mm_mul_pd(r7, rC); r8 = _mm_add_pd(r8, rF); r9 = _mm_mul_pd(r9, rE); rA = _mm_sub_pd(rA, rD); rB = _mm_mul_pd(rB, rC); r0 = _mm_mul_pd(r0, rC); r1 = _mm_add_pd(r1, rD); r2 = _mm_mul_pd(r2, rE); r3 = _mm_sub_pd(r3, rF); r4 = _mm_mul_pd(r4, rC); r5 = _mm_add_pd(r5, rD); r6 = _mm_mul_pd(r6, rE); r7 = _mm_sub_pd(r7, rF); r8 = _mm_mul_pd(r8, rC); r9 = _mm_add_pd(r9, rD); rA = _mm_mul_pd(rA, rE); rB = _mm_sub_pd(rB, rF); r0 = _mm_add_pd(r0, rF); r1 = _mm_mul_pd(r1, rE); r2 = _mm_sub_pd(r2, rD); r3 = _mm_mul_pd(r3, rC); r4 = _mm_add_pd(r4, rF); r5 = _mm_mul_pd(r5, rE); r6 = _mm_sub_pd(r6, rD); r7 = _mm_mul_pd(r7, rC); r8 = _mm_add_pd(r8, rF); r9 = _mm_mul_pd(r9, rE); rA = _mm_sub_pd(rA, rD); rB = _mm_mul_pd(rB, rC); i++; } //print(r0); //print(r1); //print(r2); //print(r3); //print(r4); //print(r5); //print(r6); //print(r7); //print(r8); //print(r9); //print(rA); //print(rB); //cout << endl; r0 = _mm_and_pd(r0, MASK); r1 = _mm_and_pd(r1, MASK); r2 = _mm_and_pd(r2, MASK); r3 = _mm_and_pd(r3, MASK); r4 = _mm_and_pd(r4, MASK); r5 = _mm_and_pd(r5, MASK); r6 = _mm_and_pd(r6, MASK); r7 = _mm_and_pd(r7, MASK); r8 = _mm_and_pd(r8, MASK); r9 = _mm_and_pd(r9, MASK); rA = _mm_and_pd(rA, MASK); rB = _mm_and_pd(rB, MASK); r0 = _mm_or_pd(r0, vONE); r1 = _mm_or_pd(r1, vONE); r2 = _mm_or_pd(r2, vONE); r3 = _mm_or_pd(r3, vONE); r4 = _mm_or_pd(r4, vONE); r5 = _mm_or_pd(r5, vONE); r6 = _mm_or_pd(r6, vONE); r7 = _mm_or_pd(r7, vONE); r8 = _mm_or_pd(r8, vONE); r9 = _mm_or_pd(r9, vONE); rA = _mm_or_pd(rA, vONE); rB = _mm_or_pd(rB, vONE); c++; } // wclk end = wclk_now(); // double secs = wclk_secs_since(start); // uint64 ops = 12 * 1000 * c * 2; // cout << "Seconds = " << secs << endl; // cout << "FP Ops = " << ops << endl; // cout << "FLOPs = " << ops / secs << endl; r0 = _mm_add_pd(r0, r1); r2 = _mm_add_pd(r2, r3); r4 = _mm_add_pd(r4, r5); r6 = _mm_add_pd(r6, r7); r8 = _mm_add_pd(r8, r9); rA = _mm_add_pd(rA, rB); r0 = _mm_add_pd(r0, r2); r4 = _mm_add_pd(r4, r6); r8 = _mm_add_pd(r8, rA); r0 = _mm_add_pd(r0, r4); r0 = _mm_add_pd(r0, r8); double out = 0; __m128d tmp = r0; out += ((double*)&tmp)[0]; out += ((double*)&tmp)[1]; return out; } void test_dp_mac_SSE2(int tds, size_t iterations){ printf("Testing SSE2 Mul + Add:\n"); double *sum = (double*)malloc(tds * sizeof(double)); wclk start = wclk_now(); #pragma omp parallel num_threads(tds) { double ret = test_dp_mac_SSE2_internal(1.1, 2.1, iterations); sum[omp_get_thread_num()] = ret; } double secs = wclk_secs_since(start); uint64 ops = 48 * 1000 * iterations * tds * 2; printf("Seconds = %g\n", secs); printf("FP Ops = %llu\n", (unsigned long long)ops); printf("FLOPs = %g\n", ops / secs); double out = 0; int c = 0; while (c < tds){ out += sum[c++]; } printf("sum = %g\n\n", out); free(sum); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// #endif
P014.c
/*14. Design, develop and execute a parallel program in C to determine and print the prime numbers which are less than 100 making use of algorithm of the Sieve of Eratosthenes. */ #include<stdio.h> #include<math.h> #include<omp.h> int main() { int num[120], i, j,n; printf("enter the value of n\n”); scanf(“%d”,&n); #pragma omp parallel for for(i=0;i<=n;i++) { num[i]=i; } #pragma omp parallel for for(i=2;i<=sqrt(n);i++) { if(num[i]!=0) { #pragma omp parallel for for(j=(i*i);j<=n;j=j+i) { num[j]=0; } } } #pragma omp parallel for printf(“The prime numbers that are less 100\n”); for(i=0;i<=n;i++) { if(num[i]!=0) printf("\nprimeno=%d threadID=%d\n",num[i],omp_get_thread_num()); } }
convolution.h
/* Copyright (c) 2018 NoobsHPC Authors All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef NBHPC_ICESWORD_OPERATOR_X86_CONV_H #define NBHPC_ICESWORD_OPERATOR_X86_CONV_H #include "icesword/operator/x86/common.h" namespace noobshpc { namespace icesword { template <ExecuteMethod EType, DataType DType> class Operator<X86, CONV, EType, DType> : public ImplBase<X86, ImplParam<X86, CONV>> { public: typedef typename DataTrait<X86, DType>::Dtype OP_DType; Operator() : thread_num(1) , accept_(nullptr) , column_(nullptr) , bias_(nullptr) , weight_(nullptr) , relu_inference(nullptr) {} ~Operator() { release(); } Status init(const std::vector<Tensor<X86> *>& inputs, std::vector<Tensor<X86> *>& outputs, ImplParam<X86, CONV>& param) override; Status execute(const std::vector<Tensor<X86> *>& inputs, std::vector<Tensor<X86> *>& outputs, ImplParam<X86, CONV>& param) override; Status release() override; private: CBLAS_GEMM<X86, DType> gemm; Operator<X86, ACT, FWD_REF, DType>* relu_inference; bool col_major; bool trans_src; bool trans_wei; bool with_bias; bool with_img2col; size_t thread_num; LayoutType layout; char offset_mode; std::vector<float> scale; void* accept_; void* column_; void* bias_; void* weight_; std::vector<OP_DType *> wei_pack_; size_t i_c; size_t o_c; size_t oh_ow; size_t ic_ih_iw; size_t kh_kw_ic; size_t batch; size_t group; size_t i_h; size_t i_w; size_t g_ic; size_t o_h; size_t o_w; size_t g_oc; size_t k_h; size_t k_w; size_t s_h; size_t s_w; size_t d_h; size_t d_w; size_t p_h; size_t p_w; std::string rm; std::string algo_act; Status init_check(const std::vector<Tensor<X86> *>& inputs, std::vector<Tensor<X86> *>& outputs, ImplParam<X86, CONV>& param) override; Status init_conf(const std::vector<Tensor<X86> *>& inputs, std::vector<Tensor<X86> *>& outputs, ImplParam<X86, CONV>& param) override; Status init_source(const std::vector<Tensor<X86> *>& inputs, std::vector<Tensor<X86> *>& outputs, ImplParam<X86, CONV>& param) override; Status img2col(const void *img, void *col); }; template <ExecuteMethod EType, DataType DType> Status Operator<X86, CONV, EType, DType>::release() { if (accept_) { gfree(accept_); accept_ = nullptr; } if (column_) { gfree(column_); column_ = nullptr; } if (relu_inference) { gfree(relu_inference); relu_inference = nullptr; } for (auto & mem : wei_pack_) { if (mem) { gemm.release(mem); mem = nullptr; } } bias_ = nullptr; weight_ = nullptr; return S_Success; } template <ExecuteMethod EType, DataType DType> Status Operator<X86, CONV, EType, DType>::init( const std::vector<Tensor<X86> *>& inputs, std::vector<Tensor<X86> *>& outputs, ImplParam<X86, CONV>& param) { if (init_check(inputs, outputs, param) != S_Success) { return S_UnImplError; } if (init_conf(inputs, outputs, param) != S_Success) { return S_UnImplError; } if (init_source(inputs, outputs, param) != S_Success) { return S_UnImplError; } return S_Success; }; template <ExecuteMethod EType, DataType DType> Status Operator<X86, CONV, EType, DType>::init_check( const std::vector<Tensor<X86> *>& inputs, std::vector<Tensor<X86> *>& outputs, ImplParam<X86, CONV>& param) { if (inputs.size() == 0 || outputs.size() == 0 || inputs[0] == nullptr || outputs[0] == nullptr || inputs[0]->data() == nullptr || outputs[0]->data() == nullptr || param.get_weight() == nullptr || param.get_weight()->data() == nullptr) { LOG(ERROR) << "wrong empty pointer !"; return S_InvalidValue; } if (DType == DT_INT8) { if (inputs[0]->get_scale().size() == 0 || outputs[0]->get_scale().size() == 0 || param.get_weight()->get_scale().size() == 0 ) { LOG(ERROR) << "wrong scale size !"; return S_InvalidValue; } } auto weight_shape = param.get_weight()->shape(); auto g_oc = weight_shape[0]; auto g_ic = weight_shape[3] * group; auto channel_check = g_ic % param.group + g_oc % param.group; if ((group > 1) && (channel_check > 0)) { LOG(ERROR) << "wrong input or output channel !"; return S_InvalidValue; } if (inputs[0]->get_layout() != outputs[0]->get_layout()) { LOG(ERROR) << "wrong input or output layout !"; return S_InvalidValue; } else { if (inputs[0]->get_layout() != LT_NCHW && inputs[0]->get_layout() != LT_NHWC) { LOG(ERROR) << "dont't support this layout !"; return S_InvalidValue; } } return S_Success; } template <ExecuteMethod EType, DataType DType> Status Operator<X86, CONV, EType, DType>::init_conf( const std::vector<Tensor<X86> *>& inputs, std::vector<Tensor<X86> *>& outputs, ImplParam<X86, CONV>& param) { with_bias = param.get_bias() ? true : false; offset_mode = with_bias ? 'C' : 'N'; bias_ = with_bias ? param.get_bias()->data() : nullptr; weight_ = param.get_weight()->data(); layout = inputs[0]->get_layout(); group = param.group; s_h = param.stride_h; s_w = param.stride_w; d_h = param.dilation_h; d_w = param.dilation_w; p_h = param.pad_h; p_w = param.pad_w; rm = param.rm; algo_act = param.act_param.algo_act; auto input_shape = inputs[0]->shape(); auto output_shape = outputs[0]->shape(); auto weight_shape = param.get_weight()->shape(); batch = input_shape[0]; g_oc = weight_shape[0]; k_h = weight_shape[1]; k_w = weight_shape[2]; g_ic = weight_shape[3] * group; if (layout == LT_NCHW) { col_major = false; trans_src = false; trans_wei = false; i_h = input_shape[2]; i_w = input_shape[3]; } else if (layout == LT_NHWC) { col_major = true; trans_src = true; trans_wei = true; i_h = input_shape[1]; i_w = input_shape[2]; } i_c = g_ic / group; o_c = g_oc / group; o_h = (i_h + 2 * p_h - k_h / d_h) / s_h + 1; o_w = (i_w + 2 * p_w - k_w / d_w) / s_w + 1; oh_ow = o_h * o_w; ic_ih_iw = i_c * i_h * i_w; kh_kw_ic = k_h * k_w * i_c; with_img2col = true; // with_img2col = !(o_h == i_h && o_w == i_w && // k_h * k_w == 1 && group == 1); auto mb_g = batch * group; auto omp_max_threads = omp_get_max_threads(); auto omp_mb_g_threads = mb_g < omp_max_threads ? mb_g : omp_max_threads; if (batch != 1) { thread_num = omp_mb_g_threads; } else { thread_num = mb_g > omp_max_threads / 2 ? omp_mb_g_threads : 1; } if (algo_act == "relu") { relu_inference = new Operator<X86, ACT, FWD_REF, DType>; } #ifdef ICESWORD_VERBOSE auto io_layout = get_layout_string(layout); auto act_type = get_algorithm_string(algo_act); LOG(INFO) << "Convolution x86 verbose{" << " layout:" << io_layout << " act:" << act_type << " bias:" << (with_bias ? "true" : "false") << " batch:" << batch << " group:" << group << " ic:" << i_c << " oc:" << o_c << " ih:" << i_h << " iw:" << i_w << " oh:" << o_h << " ow:" << o_w << " kh:" << k_h << " kw:" << k_w << " ph:" << p_h << " pw:" << p_w << " sh:" << s_h << " sw:" << s_w << " dh:" << d_h << " dw:" << d_w << " }"; #endif return S_Success; } template <ExecuteMethod EType, DataType DType> Status Operator<X86, CONV, EType, DType>::init_source( const std::vector<Tensor<X86> *>& inputs, std::vector<Tensor<X86> *>& outputs, ImplParam<X86, CONV>& param) { if (EType == FWD_REF) { return S_Success; } // LOG(INFO) << thread_num << ' ' << kh_kw_ic << ' ' << oh_ow; column_ = gcalloc(thread_num * kh_kw_ic * oh_ow, sizeof(OP_DType)); CHECK_EQ((column_ != nullptr), true) << "calloc memory failed !"; if (o_c != 1 && oh_ow != 1) { for (auto g = 0; g < group; g++) { auto wei_ = static_cast<OP_DType *>(weight_) + g * o_c * kh_kw_ic; wei_pack_.push_back(static_cast<OP_DType *>(gemm.pack(wei_, // ptr false, // col_major true, // packed_a false, // need_trans o_c, // M oh_ow, // N kh_kw_ic, // K 1.f))); // alpha } } if (DType == DT_FLOAT) { } else { accept_ = gcalloc(o_c * oh_ow, sizeof(int32_t)); CHECK_EQ((accept_ != nullptr), true) << "calloc memory failed !"; for (auto i = 0; i < o_c; i ++) { scale.push_back((inputs[0]->get_scale()[0] * param.get_weight()->get_scale()[i]) / outputs[0]->get_scale()[0]); } } return S_Success; } template <ExecuteMethod EType, DataType DType> Status Operator<X86, CONV, EType, DType>::img2col(const void *img, void *col) { CHECK_EQ((img != nullptr), true) << "wrong empty pointer !"; CHECK_EQ((col != nullptr), true) << "wrong empty pointer !"; if (layout = LT_NCHW) { if (DType == DT_FLOAT) { auto src = static_cast<const float *>(img); auto dst = static_cast<float *>(col); #pragma omp parallel for collapse(4) num_threads(thread_num) for (auto ic = 0; ic < i_c; ++ic) { for (auto oh = 0; oh < o_h; ++oh) { for (auto ow = 0; ow < o_w; ++ow) { for (auto kh = 0; kh < k_h; ++kh) { auto ih = oh * s_h - p_h + kh * d_h; if (ih < 0 || ih >= i_h) continue; #pragma omp simd for (auto kw = 0; kw < k_w; ++kw) { auto iw = ow * s_w - p_w + kw * d_w; if (iw < 0 || iw >= i_w) continue; int iidx = (ic * i_h + ih) * i_w + iw; int didx = (((kh * k_w + kw) * i_c + ic) * o_h + oh) * o_w + ow; dst[didx] = src[iidx]; } } } } } } } else { // LT_NHWC } return S_Success; } } // namespace icesword } // namespace noobshpc #endif // NBHPC_ICESWORD_OPERATOR_X86_CONV_H
2001.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { #pragma omp parallel for private(t4,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; #pragma omp parallel for private(t4,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (n - 2 < t8 + 15 ? n - 2 : t8 + 15); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
GB_unop__ceil_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ceil_fc32_fc32 // op(A') function: GB_unop_tran__ceil_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_cceilf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cceilf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_cceilf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CEIL || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ceil_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cceilf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ceil_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
eigs.c
/* Copyright © INRIA 2010-2011. Authors: Matthijs Douze & Herve Jegou Contact: matthijs.douze@inria.fr herve.jegou@inria.fr This software is a computer program whose purpose is to provide efficient tools for basic yet computationally demanding tasks, such as find k-nearest neighbors using exhaustive search and kmeans clustering. This software is governed by the CeCILL license under French law and abiding by the rules of distribution of free software. You can use, modify and/ or redistribute the software under the terms of the CeCILL license as circulated by CEA, CNRS and INRIA at the following URL "http://www.cecill.info". As a counterpart to the access to the source code and rights to copy, modify and redistribute granted by the license, users are provided only with a limited warranty and the software's author, the holder of the economic rights, and the successive licensors have only limited liability. In this respect, the user's attention is drawn to the risks associated with loading, using, modifying and/or developing or reproducing the software by the user in light of its specific status of free software, that may mean that it is complicated to manipulate, and that also therefore means that it is reserved for developers and experienced professionals having in-depth computer knowledge. Users are therefore encouraged to load and test the software's suitability as regards their requirements in conditions enabling the security of their systems and/or data to be ensured and, more generally, to use and operate it in the same conditions as regards security. The fact that you are presently reading this means that you have had knowledge of the CeCILL license and that you accept its terms. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include "eigs.h" #include "vector.h" #include "sorting.h" #include "machinedeps.h" extern void dsyev_( char *jobz, char *uplo, FINTEGER *n, double *a, FINTEGER *lda, double *w, double *work, FINTEGER *lwork, FINTEGER *info ); extern void dsygv_(FINTEGER * itype, char *jobz, char *uplo, FINTEGER *n, double *a, FINTEGER *lda, double *b, FINTEGER *lbd, double *w, double *work, FINTEGER *lwork, FINTEGER *info ); typedef float real; extern void sgemv_(const char *trans, FINTEGER *m, FINTEGER *n, real *alpha, const real *a, FINTEGER *lda, const real *x, FINTEGER *incx, real *beta, real *y, FINTEGER *incy); int eigs_sym (int di, const float * m, float * eigval, float * eigvec) { int i, j; FINTEGER d=di; double * md = (double *) memalign (16, sizeof (*md) * d * d); /* processing is performed in double precision */ for (i = 0 ; i < d ; i++) { for (j = 0 ; j < d ; j++) md[i * d + j] = (float) m[i * d + j]; } /* variable for lapack function */ double workopt = 0; FINTEGER lwork = -1, info; double * lambda = (double *) memalign (16, sizeof (*lambda) * d); dsyev_( "V", "L", &d, md, &d, lambda, &workopt, &lwork, &info ); lwork = (int) workopt; double * work = (double *) memalign (16, lwork * sizeof (*work)); dsyev_( "V", "L", &d, md, &d, lambda, work, &lwork, &info ); if (info > 0) { fprintf (stderr, "# eigs_sym: problem while computing eigen-vectors/values info=%ld\n",(long) info); goto error; } /* normalize the eigenvectors, copy and free */ double nr = 1; for (i = 0 ; i < d ; i++) { if(eigval) eigval[i] = (float) lambda[i]; if(eigvec) for (j = 0 ; j < d ; j++) eigvec[i * d + j] = (float) (md[i * d + j] / nr); } error: free (md); free (lambda); free (work); return info; } int geigs_sym (int di, const float * a, const float * b, float * eigval, float * eigvec) { int i, j; FINTEGER d=di; double * ad = (double *) memalign (16, sizeof (*ad) * d * d); double * bd = (double *) memalign (16, sizeof (*bd) * d * d); /* processing is performed in double precision */ for (i = 0 ; i < d ; i++) for (j = 0 ; j < d ; j++) { ad[i * d + j] = (float) a[i * d + j]; bd[i * d + j] = (float) b[i * d + j]; } /* variable for lapack function */ double workopt = 0; FINTEGER lwork = -1, info, itype = 1; double * lambda = (double *) memalign (16, sizeof (*lambda) * d); dsygv_ (&itype, "V", "L", &d, ad, &d, bd, &d, lambda, &workopt, &lwork, &info ); lwork = (int) workopt; double * work = (double *) memalign (16, lwork * sizeof (*work)); dsygv_ (&itype, "V", "L", &d, ad, &d, bd, &d, lambda, work, &lwork, &info ); if (info != 0) { fprintf (stderr, "# eigs_sym: problem while computing eigen-vectors/values info=%ld\n",(long) info); goto error; } /* normalize the eigenvectors, copy and free */ double nr = 1; for (i = 0 ; i < d ; i++) { if(eigval) eigval[i] = (float) lambda[i]; if(eigvec) for (j = 0 ; j < d ; j++) eigvec[i * d + j] = (float) (ad[i * d + j] / nr); } error: free (ad); free (bd); free (lambda); free (work); return info; } void eigs_reorder (int d, float * eigval, float * eigvec, int criterion) { int i; int * perm = ivec_new (d); float * eigvalst = fvec_new (d); float * eigvecst = fvec_new (d * d); fvec_sort_index (eigval, d, perm); if (criterion) for (i = 0 ; i < d / 2 ; i++) { int tmp = perm[i]; perm[i] = perm[d - 1 - i]; perm[d - 1 - i] = tmp; } for (i = 0 ; i < d ; i++) { eigvalst[i] = eigval[perm[i]]; memcpy (eigvecst + i * d, eigvec + perm[i] * d, sizeof (*eigvecst) * d); } memcpy (eigval, eigvalst, d * sizeof (*eigval)); memcpy (eigvec, eigvecst, d * d * sizeof (*eigvec)); free (eigvalst); free (eigvecst); free (perm); } int eigs_sym_part (int ni, const float * a, int nev, float * sout, float * vout) { FINTEGER n=ni; arpack_eigs_t *ae=arpack_eigs_begin(n,nev); int ret=0; for(;;) { float *x,*y; ret=arpack_eigs_step(ae,&x,&y); if(ret<0) break; /* error */ if(ret==0) break; /* stop iteration */ /* ret==1 */ float zero=0,one=1; FINTEGER ione=1; #ifndef _OPENMP sgemv_("Trans",&n,&n,&one,a,&n,x,&ione,&zero,y,&ione); #else int c, nt = count_cpu(); #pragma omp parallel for for(c = 0; c < nt; c++) { int i0 = n * c / nt; int i1 = n * (c + 1) / nt; FINTEGER id = i1 - i0; sgemv_("Trans", &n, &id, &one, a + i0 * n, &n, x, &ione, &zero, y + i0, &ione); } #endif } ret=arpack_eigs_end(ae,sout,vout); return ret; } #ifdef HAVE_ARPACK typedef FINTEGER integer; typedef FINTEGER logical; extern void ssaupd_ (integer *ido,const char*bmat,integer *n, const char*which,integer *nev, float* tol, float*resid, integer *ncv, float *v, integer *ldv, integer *iparam, integer * ipntr, float *workd, float *workl, integer *lworkl, integer *info ); extern void sseupd_ (logical *rvec, const char *howmny, logical *select, float *d , float *z ,integer *ldz , float *sigma , const char*bmat, integer *n , const char*which,integer *nev, float* tol, float*resid, integer *ncv, float *v, integer *ldv, integer *iparam, integer * ipntr, float *workd, float *workl, integer *lworkl, integer *info ); struct arpack_eigs_t { FINTEGER n,nev; FINTEGER ncv; FINTEGER ido,info; FINTEGER lworkl; float *resid,*workd,*workl; float *v; FINTEGER *iparam,*ipntr; logical *select; }; #define NEWA(type,n) (type*)malloc(sizeof(type)*(n)) #define NEWAC(type,n) (type*)calloc(sizeof(type),(n)) arpack_eigs_t *arpack_eigs_begin(int n,int nev) { arpack_eigs_t *ae=NEWAC(arpack_eigs_t, 1); if(!ae) goto mem_error; ae->n=n; ae->nev=nev; int ncv = nev * 2; /* should be enough (see remark 4 of ssaupd doc) */ ae->ncv = ncv; /* printf("nev = %d ncv = %d\n", (int)nev, (int)ncv); */ ae->lworkl = ncv*(long)(ncv+8); ae->resid=NEWA(float,n); ae->workd=NEWA(float,3*n); ae->workl=NEWA(float,ae->lworkl); ae->v=NEWA(float,n*(long)ncv); FINTEGER *iparam=ae->iparam=NEWA(FINTEGER,11); ae->ipntr=NEWA(FINTEGER,11); if(!(ae->resid && ae->workd && ae->workl && ae->v && ae->iparam && ae->ipntr)) goto mem_error; ae->info=0; /* use random initial vector */ ae->ido=0; iparam[0]=1; iparam[2]=n; iparam[6]=1; return ae; mem_error: fprintf(stderr, "Yael arpack_eigs_begin: out of memory\n"); if(ae) { free(ae->resid); free(ae->workd); free(ae->workl); free(ae->v); free(ae->iparam); free(ae->ipntr); } free(ae); return NULL; } int arpack_eigs_step(arpack_eigs_t *ae, float **x, float **y) { const char *bmat="I",*which="LM"; float tol=0; ssaupd_(&ae->ido, bmat, &ae->n, which, &ae->nev, &tol, ae->resid, &ae->ncv, ae->v, &ae->n, ae->iparam, ae->ipntr, ae->workd, ae->workl, &ae->lworkl, &ae->info); /* printf("arpack_eigs_step: ido = %d info = %d\n", (int)ae->ido, (int)ae->info); */ if(ae->ido==-1 || ae->ido==1) { *x=ae->workd+ae->ipntr[0]-1; *y=ae->workd+ae->ipntr[1]-1; return 1; } *x=*y=NULL; if(ae->info<0) { fprintf(stderr, "arpack_eigs_step: ssaupd_ error info=%ld\n",ae->info); return ae->info; } return 0; } int arpack_eigs_end(arpack_eigs_t *ae, float * sout, float * vout) { int i,ret=0; FINTEGER n=ae->n,nev=ae->nev,ncv=ae->ncv; int nconv; logical *select=NEWA(logical,ncv); float *s=NEWA(float,ncv*2); int *perm=NEWA(int,nev); if(!(select && s && perm)) { fprintf(stderr, "Yael arpack_eigs_end: out of memory\n"); ret = -100; goto error; } if(ae->info<0) { ret=ae->info; goto error; } { FINTEGER ierr; logical rvec=1; float sigma; const char *bmat="I",*which="LM"; float tol=0; sseupd_(&rvec,"All",select, s, ae->v,&n, &sigma, bmat, &n, which, &nev, &tol, ae->resid, &ncv, ae->v, &n, ae->iparam, ae->ipntr, ae->workd, ae->workl, &ae->lworkl, &ierr); if(ierr!=0) { fprintf(stderr, "arpack_eigs_end: sseupd_ error: %ld\n",ierr); ret=ierr; goto error; } ret=nconv=ae->iparam[4]; assert(nconv<=nev); } /* order v by s */ fvec_sort_index(s,nconv,perm); if(vout) for(i=0;i<nconv;i++) memcpy(vout+n*(long)i, ae->v+n*(long)(nconv-1-perm[i]), sizeof(float)*n); if(sout) for(i=0;i<nconv;i++) sout[i]=s[nconv-1-perm[i]]; error: free(select); free(perm); free(s); free(ae->resid); free(ae->workl); free(ae->workd); free(ae->iparam); free(ae->ipntr); free(ae->v); free(ae); return ret; } #else arpack_eigs_t *arpack_eigs_begin(int n,int nev) { fprintf(stderr,"Error: Yael not compiled with Arpack!"); abort(); } int arpack_eigs_step(arpack_eigs_t *ae, float **x, float **y) { fprintf(stderr,"Error: Yael not compiled with Arpack!"); abort(); } int arpack_eigs_end(arpack_eigs_t *ae, float * sout, float * vout) { fprintf(stderr,"Error: Yael not compiled with Arpack!"); abort(); } #endif
mtlinpack.c
/* Translated to C by Bonnie Toy 5/88 - modified on 2/25/94 to fix a problem with daxpy for unequal increments or equal increments not equal to 1. Jack Dongarra - modified on 08/27/09 fix typo line 270, plus set 'ix' to 0 in the case incx is not 1 Julie Langou To compile single precision version for Sun-4: cc -DSP -O4 -fopenmp -fsingle -fsingle2 clinpack.c -lm To compile double precision version for Sun-4: cc -DDP -O4 clinpack.c -lm To obtain rolled source BLAS, add -DROLL to the command lines. To obtain unrolled source BLAS, add -DUNROLL to the command lines. You must specify one of -DSP or -DDP to compile correctly. You must specify one of -DROLL or -DUNROLL to compile correctly. OUTPUTFILE is passed as the first argument (optional) NTIMES is passed as the second argument (optional) Valid command lines: ./linpack ./linpack out.txt ./linpack out.txt 100 */ #ifdef SP #define REAL float #define ZERO 0.0 #define ONE 1.0 #define PREC "Single " #endif #ifdef DP #define REAL double #define ZERO 0.0e0 #define ONE 1.0e0 #define PREC "Double " #endif #define NTIMES 10000 #define DIM 200 #define HDIM 200 #define DIMP 201 #ifdef ROLL #define ROLLING "Rolled " #endif #ifdef UNROLL #define ROLLING "Unrolled " #endif #include <stdio.h> #include <math.h> #include <time.h> #include <omp.h> static REAL times[9][9]; main (int argc, char *argv[]) { static REAL aa[DIM][DIM],a[DIM][DIMP],b[DIM],x[DIM]; REAL cray,ops,total,norma,normx; REAL resid,residn,eps,t1,tm,tm2; REAL epslon(),second(),kf; static int ipvt[DIM],n,i,ntimes,info,lda,ldaa,kflops,k; FILE *outfile; time_t mytime; lda = DIMP; ldaa = DIM; cray = .056; n = HDIM; if (argc == 3) ntimes = atoi(argv[2]); else ntimes = NTIMES; if (argc >= 2) outfile = fopen(argv[1],"a"); else outfile = stdout; fprintf(outfile,ROLLING);fprintf(outfile,PREC);fprintf(outfile,"Precision Linpack (%d)\n\n", ntimes); mytime = time(NULL); fprintf(outfile,"%s\n",ctime(&mytime)); ops = (2.0e0*(n*n*n))/3.0 + 2.0*(n*n); matgen(a,lda,n,b,&norma); t1 = second(); dgefa(a,lda,n,ipvt,&info); times[0][0] = second() - t1; t1 = second(); dgesl(a,lda,n,ipvt,b,0); times[1][0] = second() - t1; total = times[0][0] + times[1][0]; /* compute a residual to verify results. */ for (i = 0; i < n; i++) { x[i] = b[i]; } matgen(a,lda,n,b,&norma); for (i = 0; i < n; i++) { b[i] = -b[i]; } dmxpy(n,b,n,lda,x,a); resid = 0.0; normx = 0.0; for (i = 0; i < n; i++) { resid = (resid > fabs((double)b[i])) ? resid : fabs((double)b[i]); normx = (normx > fabs((double)x[i])) ? normx : fabs((double)x[i]); } eps = epslon((REAL)ONE); residn = resid/( n*norma*normx*eps ); printf(" norm. resid resid machep"); printf(" x[0]-1 x[n-1]-1\n"); printf(" %8.1f %16.8e%16.8e%16.8e%16.8e\n", (double)residn, (double)resid, (double)eps, (double)x[0]-1, (double)x[n-1]-1); fprintf(outfile," times are reported for matrices of order %5d\n",n); fprintf(outfile," dgefa dgesl total kflops unit"); fprintf(outfile," ratio\n"); times[2][0] = total; times[3][0] = ops/(1.0e3*total); times[4][0] = 2.0e3/times[3][0]; times[5][0] = total/cray; fprintf(outfile," times for array with leading dimension of%5d\n",lda); print_time(0, outfile); matgen(a,lda,n,b,&norma); t1 = second(); dgefa(a,lda,n,ipvt,&info); times[0][1] = second() - t1; t1 = second(); dgesl(a,lda,n,ipvt,b,0); times[1][1] = second() - t1; total = times[0][1] + times[1][1]; times[2][1] = total; times[3][1] = ops/(1.0e3*total); times[4][1] = 2.0e3/times[3][1]; times[5][1] = total/cray; matgen(a,lda,n,b,&norma); t1 = second(); dgefa(a,lda,n,ipvt,&info); times[0][2] = second() - t1; t1 = second(); dgesl(a,lda,n,ipvt,b,0); times[1][2] = second() - t1; total = times[0][2] + times[1][2]; times[2][2] = total; times[3][2] = ops/(1.0e3*total); times[4][2] = 2.0e3/times[3][2]; times[5][2] = total/cray; tm2 = 0.0; t1 = second(); for (i = 0; i < ntimes; i++) { tm = second(); matgen(a,lda,n,b,&norma); tm2 = tm2 + second() - tm; dgefa(a,lda,n,ipvt,&info); } times[0][3] = (second() - t1 - tm2)/ntimes; t1 = second(); for (i = 0; i < ntimes; i++) { dgesl(a,lda,n,ipvt,b,0); } times[1][3] = (second() - t1)/ntimes; total = times[0][3] + times[1][3]; times[2][3] = total; times[3][3] = ops/(1.0e3*total); times[4][3] = 2.0e3/times[3][3]; times[5][3] = total/cray; print_time(1, outfile); print_time(2, outfile); print_time(3, outfile); matgen(aa,ldaa,n,b,&norma); t1 = second(); dgefa(aa,ldaa,n,ipvt,&info); times[0][4] = second() - t1; t1 = second(); dgesl(aa,ldaa,n,ipvt,b,0); times[1][4] = second() - t1; total = times[0][4] + times[1][4]; times[2][4] = total; times[3][4] = ops/(1.0e3*total); times[4][4] = 2.0e3/times[3][4]; times[5][4] = total/cray; matgen(aa,ldaa,n,b,&norma); t1 = second(); dgefa(aa,ldaa,n,ipvt,&info); times[0][5] = second() - t1; t1 = second(); dgesl(aa,ldaa,n,ipvt,b,0); times[1][5] = second() - t1; total = times[0][5] + times[1][5]; times[2][5] = total; times[3][5] = ops/(1.0e3*total); times[4][5] = 2.0e3/times[3][5]; times[5][5] = total/cray; matgen(aa,ldaa,n,b,&norma); t1 = second(); dgefa(aa,ldaa,n,ipvt,&info); times[0][6] = second() - t1; t1 = second(); dgesl(aa,ldaa,n,ipvt,b,0); times[1][6] = second() - t1; total = times[0][6] + times[1][6]; times[2][6] = total; times[3][6] = ops/(1.0e3*total); times[4][6] = 2.0e3/times[3][6]; times[5][6] = total/cray; tm2 = 0; t1 = second(); for (i = 0; i < ntimes; i++) { tm = second(); matgen(aa,ldaa,n,b,&norma); tm2 = tm2 + second() - tm; dgefa(aa,ldaa,n,ipvt,&info); } times[0][7] = (second() - t1 - tm2)/ntimes; t1 = second(); for (i = 0; i < ntimes; i++) { dgesl(aa,ldaa,n,ipvt,b,0); } times[1][7] = (second() - t1)/ntimes; total = times[0][7] + times[1][7]; times[2][7] = total; times[3][7] = ops/(1.0e3*total); times[4][7] = 2.0e3/times[3][7]; times[5][7] = total/cray; /* the following code sequence implements the semantics of the Fortran intrinsics "nint(min(time[3][3],time[3][7]))"*/ kf = (times[3][3] < times[3][7]) ? times[3][3] : times[3][7]; kf = (kf > ZERO) ? (kf + .5) : (kf - .5); if (fabs((double)kf) < ONE) kflops = 0; else { kflops = floor(fabs((double)kf)); if (kf < ZERO) kflops = -kflops; } fprintf(outfile," times for array with leading dimension of%4d\n",ldaa); print_time(4, outfile); print_time(5, outfile); print_time(6, outfile); print_time(7, outfile); fprintf(outfile,ROLLING);fprintf(outfile,PREC); fprintf(outfile," Precision %5d Kflops ; %d Reps \n",kflops,ntimes); if (argc >= 2) fclose(outfile); return 0; } /*----------------------*/ print_time (row, outfile) int row; FILE *outfile; { fprintf(outfile,"%11.2f%11.2f%11.2f%11.0f%11.2f%11.2f\n", (double)times[0][row], (double)times[1][row], (double)times[2][row], (double)times[3][row], (double)times[4][row], (double)times[5][row]); } /*----------------------*/ matgen(a,lda,n,b,norma) REAL a[],b[],*norma; int lda, n; /* We would like to declare a[][lda], but c does not allow it. In this function, references to a[i][j] are written a[lda*j+i]. */ { int init, i, j; REAL lnorm; init = 1325; *norma = 0.0; #pragma omp parallel for private (i, init) for (j = 0; j < n; j++) { for (i = 0; i < n; i++) { init = 3125*init % 65536; a[lda*j+i] = (init - 32768.0)/16384.0; if(a[lda*j+i] > *norma) #pragma omp critical if (a[lda*j+i] > *norma) *norma = a[lda*j+i] ; //*norma = (a[lda*j+i] > *norma) ? a[lda*j+i] : *norma; } } for (i = 0; i < n; i++) { b[i] = 0.0; } for (j = 0; j < n; j++) { for (i = 0; i < n; i++) { b[i] = b[i] + a[lda*j+i]; } } } /*----------------------*/ dgefa(a,lda,n,ipvt,info) REAL a[]; int lda,n,ipvt[],*info; /* We would like to declare a[][lda], but c does not allow it. In this function, references to a[i][j] are written a[lda*i+j]. */ /* dgefa factors a double precision matrix by gaussian elimination. dgefa is usually called by dgeco, but it can be called directly with a saving in time if rcond is not needed. (time for dgeco) = (1 + 9/n)*(time for dgefa) . on entry a REAL precision[n][lda] the matrix to be factored. lda integer the leading dimension of the array a . n integer the order of the matrix a . on return a an upper triangular matrix and the multipliers which were used to obtain it. the factorization can be written a = l*u where l is a product of permutation and unit lower triangular matrices and u is upper triangular. ipvt integer[n] an integer vector of pivot indices. info integer = 0 normal value. = k if u[k][k] .eq. 0.0 . this is not an error condition for this subroutine, but it does indicate that dgesl or dgedi will divide by zero if called. use rcond in dgeco for a reliable indication of singularity. linpack. this version dated 08/14/78 . cleve moler, university of new mexico, argonne national lab. functions blas daxpy,dscal,idamax */ { /* internal variables*/ REAL t; int idamax(),j,k,kp1,l,nm1; /* gaussian elimination with partial pivoting*/ *info = 0; nm1 = n - 1; if (nm1 >= 0) { for (k = 0; k < nm1; k++) { kp1 = k + 1; /* find l = pivot index*/ l = idamax(n-k,&a[lda*k+k],1) + k; ipvt[k] = l; /* zero pivot implies this column already triangularized */ if (a[lda*k+l] != ZERO) { /* interchange if necessary */ if (l != k) { t = a[lda*k+l]; a[lda*k+l] = a[lda*k+k]; a[lda*k+k] = t; } /* compute multipliers */ t = -ONE/a[lda*k+k]; dscal(n-(k+1),t,&a[lda*k+k+1],1); /* row elimination with column indexing */ #pragma omp parallel for private (t) for (j = kp1; j < n; j++) { t = a[lda*j+l]; if (l != k) { a[lda*j+l] = a[lda*j+k]; a[lda*j+k] = t; } daxpy(n-(k+1),t,&a[lda*k+k+1],1, &a[lda*j+k+1],1); } } else { *info = k; } } } ipvt[n-1] = n-1; if (a[lda*(n-1)+(n-1)] == ZERO) *info = n-1; } /*----------------------*/ dgesl(a,lda,n,ipvt,b,job) int lda,n,ipvt[],job; REAL a[],b[]; /* We would like to declare a[][lda], but c does not allow it. In this function, references to a[i][j] are written a[lda*i+j]. */ /* dgesl solves the double precision system a * x = b or trans(a) * x = b using the factors computed by dgeco or dgefa. on entry a double precision[n][lda] the output from dgeco or dgefa. lda integer the leading dimension of the array a . n integer the order of the matrix a . ipvt integer[n] the pivot vector from dgeco or dgefa. b double precision[n] the right hand side vector. job integer = 0 to solve a*x = b , = nonzero to solve trans(a)*x = b where trans(a) is the transpose. on return b the solution vector x . error condition a division by zero will occur if the input factor contains a zero on the diagonal. technically this indicates singularity but it is often caused by improper arguments or improper setting of lda . it will not occur if the subroutines are called correctly and if dgeco has set rcond .gt. 0.0 or dgefa has set info .eq. 0 . to compute inverse(a) * c where c is a matrix with p columns dgeco(a,lda,n,ipvt,rcond,z) if (!rcond is too small){ for (j=0,j<p,j++) dgesl(a,lda,n,ipvt,c[j][0],0); } linpack. this version dated 08/14/78 . cleve moler, university of new mexico, argonne national lab. functions blas daxpy,ddot */ { /* internal variables*/ REAL ddot(),t; int k,kb,l,nm1; nm1 = n - 1; if (job == 0) { /* job = 0 , solve a * x = b first solve l*y = b */ if (nm1 >= 1) { for (k = 0; k < nm1; k++) { l = ipvt[k]; t = b[l]; if (l != k){ b[l] = b[k]; b[k] = t; } daxpy(n-(k+1),t,&a[lda*k+k+1],1,&b[k+1],1); } } /* now solve u*x = y */ for (kb = 0; kb < n; kb++) { k = n - (kb + 1); b[k] = b[k]/a[lda*k+k]; t = -b[k]; daxpy(k,t,&a[lda*k+0],1,&b[0],1); } } else { /* job = nonzero, solve trans(a) * x = b first solve trans(u)*y = b */ for (k = 0; k < n; k++) { t = ddot(k,&a[lda*k+0],1,&b[0],1); b[k] = (b[k] - t)/a[lda*k+k]; } /* now solve trans(l)*x = y*/ if (nm1 >= 1) { for (kb = 1; kb < nm1; kb++) { k = n - (kb+1); b[k] = b[k] + ddot(n-(k+1),&a[lda*k+k+1],1,&b[k+1],1); l = ipvt[k]; if (l != k) { t = b[l]; b[l] = b[k]; b[k] = t; } } } } } /*----------------------*/ daxpy(n,da,dx,incx,dy,incy) /* constant times a vector plus a vector. jack dongarra, linpack, 3/11/78. */ REAL dx[],dy[],da; int incx,incy,n; { int i,ix,iy,m,mp1; if(n <= 0) return; if (da == ZERO) return; if(incx != 1 || incy != 1) { /* code for unequal increments or equal increments not equal to 1 */ ix = 0; iy = 0; if(incx < 0) ix = (-n+1)*incx; if(incy < 0)iy = (-n+1)*incy; for (i = 0;i < n; i++) { dy[iy] = dy[iy] + da*dx[ix]; ix = ix + incx; iy = iy + incy; } return; } /* code for both increments equal to 1 */ #ifdef ROLL #pragma omp parallel for for (i = 0;i < n; i++) { dy[i] = dy[i] + da*dx[i]; } #endif #ifdef UNROLL m = n % 4; if ( m != 0) { for (i = 0; i < m; i++) dy[i] = dy[i] + da*dx[i]; if (n < 4) return; } #pragma omp parallel for for (i = m; i < n; i = i + 4) { dy[i] = dy[i] + da*dx[i]; dy[i+1] = dy[i+1] + da*dx[i+1]; dy[i+2] = dy[i+2] + da*dx[i+2]; dy[i+3] = dy[i+3] + da*dx[i+3]; } #endif } /*----------------------*/ REAL ddot(n,dx,incx,dy,incy) /* forms the dot product of two vectors. jack dongarra, linpack, 3/11/78. */ REAL dx[],dy[]; int incx,incy,n; { REAL dtemp; int i,ix,iy,m,mp1; dtemp = ZERO; if(n <= 0) return(ZERO); if(incx != 1 || incy != 1) { /* code for unequal increments or equal increments not equal to 1*/ ix = 0; iy = 0; if (incx < 0) ix = (-n+1)*incx; if (incy < 0) iy = (-n+1)*incy; for (i = 0;i < n; i++) { dtemp = dtemp + dx[ix]*dy[iy]; ix = ix + incx; iy = iy + incy; } return(dtemp); } /* code for both increments equal to 1 */ #ifdef ROLL #pragma omp parallel for for (i=0;i < n; i++) dtemp = dtemp + dx[i]*dy[i]; return(dtemp); #endif #ifdef UNROLL m = n % 5; if (m != 0) { for (i = 0; i < m; i++) dtemp = dtemp + dx[i]*dy[i]; if (n < 5) return(dtemp); } #pragma omp parallel for for (i = m; i < n; i = i + 5) { dtemp = dtemp + dx[i]*dy[i] + dx[i+1]*dy[i+1] + dx[i+2]*dy[i+2] + dx[i+3]*dy[i+3] + dx[i+4]*dy[i+4]; } return(dtemp); #endif } /*----------------------*/ dscal(n,da,dx,incx) /* scales a vector by a constant. jack dongarra, linpack, 3/11/78. */ REAL da,dx[]; int n, incx; { int i,m,mp1,nincx; if(n <= 0)return; if(incx != 1) { /* code for increment not equal to 1 */ nincx = n*incx; for (i = 0; i < nincx; i = i + incx) dx[i] = da*dx[i]; return; } /* code for increment equal to 1 */ #ifdef ROLL #pragma omp parallel for for (i = 0; i < n; i++) dx[i] = da*dx[i]; #endif #ifdef UNROLL m = n % 5; if (m != 0) { for (i = 0; i < m; i++) dx[i] = da*dx[i]; if (n < 5) return; } #pragma omp parallel for for (i = m; i < n; i = i + 5){ dx[i] = da*dx[i]; dx[i+1] = da*dx[i+1]; dx[i+2] = da*dx[i+2]; dx[i+3] = da*dx[i+3]; dx[i+4] = da*dx[i+4]; } #endif } /*----------------------*/ int idamax(n,dx,incx) /* finds the index of element having max. absolute value. jack dongarra, linpack, 3/11/78. */ REAL dx[]; int incx,n; { REAL dmax; int i, ix, itemp; if( n < 1 ) return(-1); if(n ==1 ) return(0); if(incx != 1) { /* code for increment not equal to 1 */ ix = 0; dmax = fabs((double)dx[0]); ix = ix + incx; for (i = 1; i < n; i++) { if(fabs((double)dx[ix]) > dmax) { itemp = i; dmax = fabs((double)dx[ix]); } ix = ix + incx; } } else { /* code for increment equal to 1 */ itemp = 0; dmax = fabs((double)dx[0]); for (i = 1; i < n; i++) { if(fabs((double)dx[i]) > dmax) { itemp = i; dmax = fabs((double)dx[i]); } } } return (itemp); } /*----------------------*/ REAL epslon (x) REAL x; /* estimate unit roundoff in quantities of size x. */ { REAL a,b,c,eps; /* this program should function properly on all systems satisfying the following two assumptions, 1. the base used in representing dfloating point numbers is not a power of three. 2. the quantity a in statement 10 is represented to the accuracy used in dfloating point variables that are stored in memory. the statement number 10 and the go to 10 are intended to force optimizing compilers to generate code satisfying assumption 2. under these assumptions, it should be true that, a is not exactly equal to four-thirds, b has a zero for its last bit or digit, c is not exactly equal to one, eps measures the separation of 1.0 from the next larger dfloating point number. the developers of eispack would appreciate being informed about any systems where these assumptions do not hold. ***************************************************************** this routine is one of the auxiliary routines used by eispack iii to avoid machine dependencies. ***************************************************************** this version dated 4/6/83. */ a = 4.0e0/3.0e0; eps = ZERO; while (eps == ZERO) { b = a - ONE; c = b + b + b; eps = fabs((double)(c-ONE)); } return(eps*fabs((double)x)); } /*----------------------*/ dmxpy (n1, y, n2, ldm, x, m) REAL y[], x[], m[]; int n1, n2, ldm; /* We would like to declare m[][ldm], but c does not allow it. In this function, references to m[i][j] are written m[ldm*i+j]. */ /* purpose: multiply matrix m times vector x and add the result to vector y. parameters: n1 integer, number of elements in vector y, and number of rows in matrix m y double [n1], vector of length n1 to which is added the product m*x n2 integer, number of elements in vector x, and number of columns in matrix m ldm integer, leading dimension of array m x double [n2], vector of length n2 m double [ldm][n2], matrix of n1 rows and n2 columns ---------------------------------------------------------------------- */ { int j,i,jmin; /* cleanup odd vector */ j = n2 % 2; if (j >= 1) { j = j - 1; for (i = 0; i < n1; i++) y[i] = (y[i]) + x[j]*m[ldm*j+i]; } /* cleanup odd group of two vectors */ j = n2 % 4; if (j >= 2) { j = j - 1; for (i = 0; i < n1; i++) y[i] = ( (y[i]) + x[j-1]*m[ldm*(j-1)+i]) + x[j]*m[ldm*j+i]; } /* cleanup odd group of four vectors */ j = n2 % 8; if (j >= 4) { j = j - 1; for (i = 0; i < n1; i++) y[i] = ((( (y[i]) + x[j-3]*m[ldm*(j-3)+i]) + x[j-2]*m[ldm*(j-2)+i]) + x[j-1]*m[ldm*(j-1)+i]) + x[j]*m[ldm*j+i]; } /* cleanup odd group of eight vectors */ j = n2 % 16; if (j >= 8) { j = j - 1; for (i = 0; i < n1; i++) y[i] = ((((((( (y[i]) + x[j-7]*m[ldm*(j-7)+i]) + x[j-6]*m[ldm*(j-6)+i]) + x[j-5]*m[ldm*(j-5)+i]) + x[j-4]*m[ldm*(j-4)+i]) + x[j-3]*m[ldm*(j-3)+i]) + x[j-2]*m[ldm*(j-2)+i]) + x[j-1]*m[ldm*(j-1)+i]) + x[j] *m[ldm*j+i]; } /* main loop - groups of sixteen vectors */ jmin = (n2%16)+16; for (j = jmin-1; j < n2; j = j + 16) { for (i = 0; i < n1; i++) y[i] = ((((((((((((((( (y[i]) + x[j-15]*m[ldm*(j-15)+i]) + x[j-14]*m[ldm*(j-14)+i]) + x[j-13]*m[ldm*(j-13)+i]) + x[j-12]*m[ldm*(j-12)+i]) + x[j-11]*m[ldm*(j-11)+i]) + x[j-10]*m[ldm*(j-10)+i]) + x[j- 9]*m[ldm*(j- 9)+i]) + x[j- 8]*m[ldm*(j- 8)+i]) + x[j- 7]*m[ldm*(j- 7)+i]) + x[j- 6]*m[ldm*(j- 6)+i]) + x[j- 5]*m[ldm*(j- 5)+i]) + x[j- 4]*m[ldm*(j- 4)+i]) + x[j- 3]*m[ldm*(j- 3)+i]) + x[j- 2]*m[ldm*(j- 2)+i]) + x[j- 1]*m[ldm*(j- 1)+i]) + x[j] *m[ldm*j+i]; } } /*----------------------*/ REAL second() { #include <sys/time.h> #include <sys/resource.h> struct rusage ru; REAL t ; //void getrusage(); getrusage(RUSAGE_SELF,&ru) ; t = (REAL) (ru.ru_utime.tv_sec+ru.ru_stime.tv_sec) + ((REAL) (ru.ru_utime.tv_usec+ru.ru_stime.tv_usec))/1.0e6 ; return t ; }
bubbleacc.c
// C program for implementation of Bubble sort #include <stdio.h> #include <time.h> #define getClock() ((double)clock()/CLOCKS_PER_SEC) #include <omp.h> double begin,end; void swap(int *xp, int *yp) { int temp = *xp; *xp = *yp; *yp = temp; } // A function to implement bubble sort void bubbleSort(int arr[], int n) { int i, j; begin = getClock(); #pragma acc parallel loop collapse(2) for (i = 0; i < n-1; i++) // Last i elements are already in place //#pragma omp parallel for reduction(+:j) for (j = 0; j < n-i-1; j++) if (arr[j] > arr[j+1]) swap(&arr[j], &arr[j+1]); end = getClock(); } /* Function to print an array */ void printArray(int arr[], int size) { int i; for (i=0; i < size; i++) printf("%d ", arr[i]); printf("\n"); } // Driver program to test above functions int main() { FILE *myFile; myFile = fopen("dataset.txt","r"); int data[100000]; int i; for (i=0;i<100000;i++){ fscanf(myFile,"%d",&data[i]); } int n = sizeof(data)/sizeof(data[0]); bubbleSort(data, n); printf("Sorted array: \n"); printArray(data, n); printf("The bubblesort took %f seconds to complete\t",end-begin); return 0; }